Repository: envoyproxy/envoy-wasm Branch: master Commit: ab5d9381fdf9 Files: 7386 Total size: 35.1 MB Directory structure: gitextract_0bqlinc2/ ├── .azure-pipelines/ │ ├── bazel.yml │ ├── cleanup.sh │ └── pipelines.yml ├── .bazelci/ │ └── presubmit.yml ├── .bazelignore ├── .bazelrc ├── .bazelversion ├── .circleci/ │ └── config.yml ├── .clang-format ├── .clang-tidy ├── .devcontainer/ │ ├── .gitignore │ ├── Dockerfile │ ├── README.md │ ├── devcontainer.json │ └── setup.sh ├── .gitattributes ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── config.yml │ │ ├── feature_request.md │ │ ├── non--crash-security--bug.md │ │ └── other.md │ ├── stale.yml │ └── workflows/ │ ├── codeql-daily.yml │ ├── codeql-push.yml │ └── get_build_targets.sh ├── .gitignore ├── .zuul/ │ └── playbooks/ │ └── envoy-build/ │ └── run.yaml ├── .zuul.yaml ├── BUILD ├── CODEOWNERS ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── DCO ├── DEPENDENCY_POLICY.md ├── DEPRECATED.md ├── DEVELOPER.md ├── EXTENSION_POLICY.md ├── GOVERNANCE.md ├── LICENSE ├── NOTICE ├── OWNERS.md ├── PULL_REQUESTS.md ├── PULL_REQUEST_TEMPLATE.md ├── README.md ├── RELEASES.md ├── REPO_LAYOUT.md ├── SECURITY.md ├── STYLE.md ├── VERSION ├── WORKSPACE ├── api/ │ ├── API_OVERVIEW.md │ ├── API_VERSIONING.md │ ├── BUILD │ ├── CONTRIBUTING.md │ ├── README.md │ ├── STYLE.md │ ├── bazel/ │ │ ├── BUILD │ │ ├── api_build_system.bzl │ │ ├── envoy_http_archive.bzl │ │ ├── external_proto_deps.bzl │ │ ├── repositories.bzl │ │ └── repository_locations.bzl │ ├── envoy/ │ │ ├── admin/ │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── certs.proto │ │ │ │ ├── clusters.proto │ │ │ │ ├── config_dump.proto │ │ │ │ ├── listeners.proto │ │ │ │ ├── memory.proto │ │ │ │ ├── metrics.proto │ │ │ │ ├── mutex_stats.proto │ │ │ │ ├── server_info.proto │ │ │ │ └── tap.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── certs.proto │ │ │ │ ├── clusters.proto │ │ │ │ ├── config_dump.proto │ │ │ │ ├── init_dump.proto │ │ │ │ ├── listeners.proto │ │ │ │ ├── memory.proto │ │ │ │ ├── metrics.proto │ │ │ │ ├── mutex_stats.proto │ │ │ │ ├── server_info.proto │ │ │ │ └── tap.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── certs.proto │ │ │ ├── clusters.proto │ │ │ ├── config_dump.proto │ │ │ ├── init_dump.proto │ │ │ ├── listeners.proto │ │ │ ├── memory.proto │ │ │ ├── metrics.proto │ │ │ ├── mutex_stats.proto │ │ │ ├── server_info.proto │ │ │ └── tap.proto │ │ ├── annotations/ │ │ │ ├── BUILD │ │ │ ├── deprecation.proto │ │ │ └── resource.proto │ │ ├── api/ │ │ │ └── v2/ │ │ │ ├── BUILD │ │ │ ├── README.md │ │ │ ├── auth/ │ │ │ │ ├── BUILD │ │ │ │ ├── cert.proto │ │ │ │ ├── common.proto │ │ │ │ ├── secret.proto │ │ │ │ └── tls.proto │ │ │ ├── cds.proto │ │ │ ├── cluster/ │ │ │ │ ├── BUILD │ │ │ │ ├── circuit_breaker.proto │ │ │ │ ├── filter.proto │ │ │ │ └── outlier_detection.proto │ │ │ ├── cluster.proto │ │ │ ├── core/ │ │ │ │ ├── BUILD │ │ │ │ ├── address.proto │ │ │ │ ├── backoff.proto │ │ │ │ ├── base.proto │ │ │ │ ├── config_source.proto │ │ │ │ ├── event_service_config.proto │ │ │ │ ├── grpc_method_list.proto │ │ │ │ ├── grpc_service.proto │ │ │ │ ├── health_check.proto │ │ │ │ ├── http_uri.proto │ │ │ │ ├── protocol.proto │ │ │ │ └── socket_option.proto │ │ │ ├── discovery.proto │ │ │ ├── eds.proto │ │ │ ├── endpoint/ │ │ │ │ ├── BUILD │ │ │ │ ├── endpoint.proto │ │ │ │ ├── endpoint_components.proto │ │ │ │ └── load_report.proto │ │ │ ├── endpoint.proto │ │ │ ├── lds.proto │ │ │ ├── listener/ │ │ │ │ ├── BUILD │ │ │ │ ├── listener.proto │ │ │ │ ├── listener_components.proto │ │ │ │ ├── quic_config.proto │ │ │ │ └── udp_listener_config.proto │ │ │ ├── listener.proto │ │ │ ├── ratelimit/ │ │ │ │ ├── BUILD │ │ │ │ └── ratelimit.proto │ │ │ ├── rds.proto │ │ │ ├── route/ │ │ │ │ ├── BUILD │ │ │ │ ├── route.proto │ │ │ │ └── route_components.proto │ │ │ ├── route.proto │ │ │ ├── scoped_route.proto │ │ │ └── srds.proto │ │ ├── config/ │ │ │ ├── README.md │ │ │ ├── accesslog/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── als.proto │ │ │ │ │ └── file.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── accesslog.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── accesslog.proto │ │ │ ├── bootstrap/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── bootstrap.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── bootstrap.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── bootstrap.proto │ │ │ ├── cluster/ │ │ │ │ ├── aggregate/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── cluster.proto │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── cluster.proto │ │ │ │ ├── redis/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── redis_cluster.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── circuit_breaker.proto │ │ │ │ │ ├── cluster.proto │ │ │ │ │ ├── filter.proto │ │ │ │ │ └── outlier_detection.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── circuit_breaker.proto │ │ │ │ ├── cluster.proto │ │ │ │ ├── filter.proto │ │ │ │ └── outlier_detection.proto │ │ │ ├── common/ │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dns_cache.proto │ │ │ │ ├── matcher/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── matcher.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── matcher.proto │ │ │ │ └── tap/ │ │ │ │ └── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── common.proto │ │ │ ├── core/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── address.proto │ │ │ │ │ ├── backoff.proto │ │ │ │ │ ├── base.proto │ │ │ │ │ ├── config_source.proto │ │ │ │ │ ├── event_service_config.proto │ │ │ │ │ ├── extension.proto │ │ │ │ │ ├── grpc_method_list.proto │ │ │ │ │ ├── grpc_service.proto │ │ │ │ │ ├── health_check.proto │ │ │ │ │ ├── http_uri.proto │ │ │ │ │ ├── protocol.proto │ │ │ │ │ ├── proxy_protocol.proto │ │ │ │ │ ├── socket_option.proto │ │ │ │ │ └── substitution_format_string.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── address.proto │ │ │ │ ├── backoff.proto │ │ │ │ ├── base.proto │ │ │ │ ├── config_source.proto │ │ │ │ ├── event_service_config.proto │ │ │ │ ├── extension.proto │ │ │ │ ├── grpc_method_list.proto │ │ │ │ ├── grpc_service.proto │ │ │ │ ├── health_check.proto │ │ │ │ ├── http_uri.proto │ │ │ │ ├── protocol.proto │ │ │ │ ├── proxy_protocol.proto │ │ │ │ ├── socket_option.proto │ │ │ │ └── substitution_format_string.proto │ │ │ ├── endpoint/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── endpoint.proto │ │ │ │ ├── endpoint_components.proto │ │ │ │ └── load_report.proto │ │ │ ├── filter/ │ │ │ │ ├── README.md │ │ │ │ ├── accesslog/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── accesslog.proto │ │ │ │ ├── dubbo/ │ │ │ │ │ └── router/ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── router.proto │ │ │ │ ├── fault/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── fault.proto │ │ │ │ ├── http/ │ │ │ │ │ ├── adaptive_concurrency/ │ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── adaptive_concurrency.proto │ │ │ │ │ ├── aws_lambda/ │ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── aws_lambda.proto │ │ │ │ │ ├── aws_request_signing/ │ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── aws_request_signing.proto │ │ │ │ │ ├── buffer/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── buffer.proto │ │ │ │ │ ├── cache/ │ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── cache.proto │ │ │ │ │ ├── compressor/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── compressor.proto │ │ │ │ │ ├── cors/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── cors.proto │ │ │ │ │ ├── csrf/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── csrf.proto │ │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── dynamic_forward_proxy.proto │ │ │ │ │ ├── dynamo/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── dynamo.proto │ │ │ │ │ ├── ext_authz/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── ext_authz.proto │ │ │ │ │ ├── fault/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── fault.proto │ │ │ │ │ ├── grpc_http1_bridge/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── grpc_http1_reverse_bridge/ │ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── grpc_stats/ │ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── grpc_web/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── grpc_web.proto │ │ │ │ │ ├── gzip/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── gzip.proto │ │ │ │ │ ├── header_to_metadata/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── header_to_metadata.proto │ │ │ │ │ ├── health_check/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── health_check.proto │ │ │ │ │ ├── ip_tagging/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── ip_tagging.proto │ │ │ │ │ ├── jwt_authn/ │ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── lua/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── lua.proto │ │ │ │ │ ├── on_demand/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── on_demand.proto │ │ │ │ │ ├── original_src/ │ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── original_src.proto │ │ │ │ │ ├── rate_limit/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rate_limit.proto │ │ │ │ │ ├── rbac/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rbac.proto │ │ │ │ │ ├── router/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── router.proto │ │ │ │ │ ├── squash/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── squash.proto │ │ │ │ │ ├── tap/ │ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── tap.proto │ │ │ │ │ └── transcoder/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── transcoder.proto │ │ │ │ ├── listener/ │ │ │ │ │ ├── http_inspector/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── http_inspector.proto │ │ │ │ │ ├── original_dst/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── original_dst.proto │ │ │ │ │ ├── original_src/ │ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── original_src.proto │ │ │ │ │ ├── proxy_protocol/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── proxy_protocol.proto │ │ │ │ │ └── tls_inspector/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── tls_inspector.proto │ │ │ │ ├── network/ │ │ │ │ │ ├── client_ssl_auth/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── client_ssl_auth.proto │ │ │ │ │ ├── direct_response/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── dubbo_proxy/ │ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ ├── dubbo_proxy.proto │ │ │ │ │ │ └── route.proto │ │ │ │ │ ├── echo/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── echo.proto │ │ │ │ │ ├── ext_authz/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── ext_authz.proto │ │ │ │ │ ├── http_connection_manager/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── http_connection_manager.proto │ │ │ │ │ ├── kafka_broker/ │ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── kafka_broker.proto │ │ │ │ │ ├── local_rate_limit/ │ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── local_rate_limit.proto │ │ │ │ │ ├── mongo_proxy/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── mongo_proxy.proto │ │ │ │ │ ├── mysql_proxy/ │ │ │ │ │ │ └── v1alpha1/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── mysql_proxy.proto │ │ │ │ │ ├── rate_limit/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rate_limit.proto │ │ │ │ │ ├── rbac/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rbac.proto │ │ │ │ │ ├── redis_proxy/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── redis_proxy.proto │ │ │ │ │ ├── sni_cluster/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── sni_cluster.proto │ │ │ │ │ ├── tcp_proxy/ │ │ │ │ │ │ └── v2/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── tcp_proxy.proto │ │ │ │ │ ├── thrift_proxy/ │ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ ├── route.proto │ │ │ │ │ │ └── thrift_proxy.proto │ │ │ │ │ └── zookeeper_proxy/ │ │ │ │ │ └── v1alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── zookeeper_proxy.proto │ │ │ │ ├── thrift/ │ │ │ │ │ ├── rate_limit/ │ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rate_limit.proto │ │ │ │ │ └── router/ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── router.proto │ │ │ │ └── udp/ │ │ │ │ └── udp_proxy/ │ │ │ │ └── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── udp_proxy.proto │ │ │ ├── grpc_credential/ │ │ │ │ ├── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── aws_iam.proto │ │ │ │ │ └── file_based_metadata.proto │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── aws_iam.proto │ │ │ │ └── file_based_metadata.proto │ │ │ ├── health_checker/ │ │ │ │ └── redis/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── redis.proto │ │ │ ├── listener/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── api_listener.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── api_listener.proto │ │ │ │ │ ├── listener.proto │ │ │ │ │ ├── listener_components.proto │ │ │ │ │ ├── quic_config.proto │ │ │ │ │ ├── udp_default_writer_config.proto │ │ │ │ │ ├── udp_gso_batch_writer_config.proto │ │ │ │ │ └── udp_listener_config.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── api_listener.proto │ │ │ │ ├── listener.proto │ │ │ │ ├── listener_components.proto │ │ │ │ ├── quic_config.proto │ │ │ │ ├── udp_default_writer_config.proto │ │ │ │ ├── udp_gso_batch_writer_config.proto │ │ │ │ └── udp_listener_config.proto │ │ │ ├── metrics/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── metrics_service.proto │ │ │ │ │ └── stats.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── metrics_service.proto │ │ │ │ │ └── stats.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── metrics_service.proto │ │ │ │ └── stats.proto │ │ │ ├── overload/ │ │ │ │ ├── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── overload.proto │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── overload.proto │ │ │ ├── ratelimit/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rls.proto │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── rls.proto │ │ │ ├── rbac/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rbac.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rbac.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── rbac.proto │ │ │ ├── resource_monitor/ │ │ │ │ ├── fixed_heap/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── fixed_heap.proto │ │ │ │ └── injected_resource/ │ │ │ │ └── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── injected_resource.proto │ │ │ ├── retry/ │ │ │ │ ├── omit_canary_hosts/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── omit_canary_hosts.proto │ │ │ │ ├── omit_host_metadata/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── omit_host_metadata_config.proto │ │ │ │ ├── previous_hosts/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── previous_hosts.proto │ │ │ │ └── previous_priorities/ │ │ │ │ ├── BUILD │ │ │ │ └── previous_priorities_config.proto │ │ │ ├── route/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── route.proto │ │ │ │ │ ├── route_components.proto │ │ │ │ │ └── scoped_route.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── route.proto │ │ │ │ ├── route_components.proto │ │ │ │ └── scoped_route.proto │ │ │ ├── tap/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── common.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── common.proto │ │ │ ├── trace/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── datadog.proto │ │ │ │ │ ├── dynamic_ot.proto │ │ │ │ │ ├── http_tracer.proto │ │ │ │ │ ├── lightstep.proto │ │ │ │ │ ├── opencensus.proto │ │ │ │ │ ├── service.proto │ │ │ │ │ ├── trace.proto │ │ │ │ │ └── zipkin.proto │ │ │ │ ├── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── xray.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── datadog.proto │ │ │ │ │ ├── dynamic_ot.proto │ │ │ │ │ ├── http_tracer.proto │ │ │ │ │ ├── lightstep.proto │ │ │ │ │ ├── opencensus.proto │ │ │ │ │ ├── service.proto │ │ │ │ │ ├── trace.proto │ │ │ │ │ ├── xray.proto │ │ │ │ │ └── zipkin.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── http_tracer.proto │ │ │ │ └── service.proto │ │ │ └── transport_socket/ │ │ │ ├── alts/ │ │ │ │ └── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── alts.proto │ │ │ ├── raw_buffer/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── raw_buffer.proto │ │ │ └── tap/ │ │ │ └── v2alpha/ │ │ │ ├── BUILD │ │ │ └── tap.proto │ │ ├── data/ │ │ │ ├── accesslog/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── accesslog.proto │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── accesslog.proto │ │ │ ├── cluster/ │ │ │ │ ├── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── outlier_detection_event.proto │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── outlier_detection_event.proto │ │ │ ├── core/ │ │ │ │ ├── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── health_check_event.proto │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── health_check_event.proto │ │ │ ├── dns/ │ │ │ │ ├── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dns_table.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dns_table.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── dns_table.proto │ │ │ └── tap/ │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── common.proto │ │ │ │ ├── http.proto │ │ │ │ ├── transport.proto │ │ │ │ └── wrapper.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ ├── common.proto │ │ │ ├── http.proto │ │ │ ├── transport.proto │ │ │ └── wrapper.proto │ │ ├── extensions/ │ │ │ ├── access_loggers/ │ │ │ │ ├── file/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── file.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── file.proto │ │ │ │ ├── grpc/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── als.proto │ │ │ │ └── wasm/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── wasm.proto │ │ │ ├── clusters/ │ │ │ │ ├── aggregate/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── cluster.proto │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── cluster.proto │ │ │ │ └── redis/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── redis_cluster.proto │ │ │ ├── common/ │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dns_cache.proto │ │ │ │ ├── ratelimit/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── ratelimit.proto │ │ │ │ └── tap/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── common.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── common.proto │ │ │ ├── compression/ │ │ │ │ └── gzip/ │ │ │ │ ├── compressor/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── gzip.proto │ │ │ │ └── decompressor/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── gzip.proto │ │ │ ├── filters/ │ │ │ │ ├── common/ │ │ │ │ │ └── fault/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── fault.proto │ │ │ │ ├── http/ │ │ │ │ │ ├── adaptive_concurrency/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── adaptive_concurrency.proto │ │ │ │ │ ├── admission_control/ │ │ │ │ │ │ └── v3alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── admission_control.proto │ │ │ │ │ ├── aws_lambda/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── aws_lambda.proto │ │ │ │ │ ├── aws_request_signing/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── aws_request_signing.proto │ │ │ │ │ ├── buffer/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── buffer.proto │ │ │ │ │ ├── cache/ │ │ │ │ │ │ ├── v3alpha/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── cache.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── cache.proto │ │ │ │ │ ├── cdn_loop/ │ │ │ │ │ │ └── v3alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── cdn_loop.proto │ │ │ │ │ ├── compressor/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── compressor.proto │ │ │ │ │ ├── cors/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── cors.proto │ │ │ │ │ ├── csrf/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── csrf.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── csrf.proto │ │ │ │ │ ├── decompressor/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── decompressor.proto │ │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── dynamic_forward_proxy.proto │ │ │ │ │ ├── dynamo/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── dynamo.proto │ │ │ │ │ ├── ext_authz/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── ext_authz.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── ext_authz.proto │ │ │ │ │ ├── fault/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── fault.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── fault.proto │ │ │ │ │ ├── grpc_http1_bridge/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── grpc_http1_reverse_bridge/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── grpc_json_transcoder/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── transcoder.proto │ │ │ │ │ ├── grpc_stats/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── grpc_web/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── grpc_web.proto │ │ │ │ │ ├── gzip/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── gzip.proto │ │ │ │ │ ├── header_to_metadata/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── header_to_metadata.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── header_to_metadata.proto │ │ │ │ │ ├── health_check/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── health_check.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── health_check.proto │ │ │ │ │ ├── ip_tagging/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── ip_tagging.proto │ │ │ │ │ ├── jwt_authn/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ │ └── config.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── local_rate_limit.proto │ │ │ │ │ ├── lua/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── lua.proto │ │ │ │ │ ├── oauth2/ │ │ │ │ │ │ ├── v3alpha/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── oauth.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── oauth.proto │ │ │ │ │ ├── on_demand/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── on_demand.proto │ │ │ │ │ ├── original_src/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── original_src.proto │ │ │ │ │ ├── ratelimit/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rate_limit.proto │ │ │ │ │ ├── rbac/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── rbac.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rbac.proto │ │ │ │ │ ├── router/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── router.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── router.proto │ │ │ │ │ ├── squash/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── squash.proto │ │ │ │ │ ├── tap/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── tap.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── tap.proto │ │ │ │ │ └── wasm/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── wasm.proto │ │ │ │ ├── listener/ │ │ │ │ │ ├── http_inspector/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── http_inspector.proto │ │ │ │ │ ├── original_dst/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── original_dst.proto │ │ │ │ │ ├── original_src/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── original_src.proto │ │ │ │ │ ├── proxy_protocol/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── proxy_protocol.proto │ │ │ │ │ └── tls_inspector/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── tls_inspector.proto │ │ │ │ ├── network/ │ │ │ │ │ ├── client_ssl_auth/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── client_ssl_auth.proto │ │ │ │ │ ├── direct_response/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ ├── dubbo_proxy/ │ │ │ │ │ │ ├── router/ │ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── router.proto │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ │ ├── dubbo_proxy.proto │ │ │ │ │ │ │ └── route.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── dubbo_proxy.proto │ │ │ │ │ │ └── route.proto │ │ │ │ │ ├── echo/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── echo.proto │ │ │ │ │ ├── ext_authz/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── ext_authz.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── ext_authz.proto │ │ │ │ │ ├── http_connection_manager/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── http_connection_manager.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── http_connection_manager.proto │ │ │ │ │ ├── kafka_broker/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── kafka_broker.proto │ │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── local_rate_limit.proto │ │ │ │ │ ├── mongo_proxy/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── mongo_proxy.proto │ │ │ │ │ ├── mysql_proxy/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── mysql_proxy.proto │ │ │ │ │ ├── postgres_proxy/ │ │ │ │ │ │ └── v3alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── postgres_proxy.proto │ │ │ │ │ ├── ratelimit/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rate_limit.proto │ │ │ │ │ ├── rbac/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── rbac.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rbac.proto │ │ │ │ │ ├── redis_proxy/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── redis_proxy.proto │ │ │ │ │ ├── rocketmq_proxy/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ │ ├── rocketmq_proxy.proto │ │ │ │ │ │ │ └── route.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── rocketmq_proxy.proto │ │ │ │ │ │ └── route.proto │ │ │ │ │ ├── sni_cluster/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── sni_cluster.proto │ │ │ │ │ ├── sni_dynamic_forward_proxy/ │ │ │ │ │ │ └── v3alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── sni_dynamic_forward_proxy.proto │ │ │ │ │ ├── tcp_proxy/ │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── tcp_proxy.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── tcp_proxy.proto │ │ │ │ │ ├── thrift_proxy/ │ │ │ │ │ │ ├── filters/ │ │ │ │ │ │ │ └── ratelimit/ │ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ └── rate_limit.proto │ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ │ ├── route.proto │ │ │ │ │ │ │ └── thrift_proxy.proto │ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── route.proto │ │ │ │ │ │ └── thrift_proxy.proto │ │ │ │ │ ├── wasm/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── wasm.proto │ │ │ │ │ └── zookeeper_proxy/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── zookeeper_proxy.proto │ │ │ │ └── udp/ │ │ │ │ ├── dns_filter/ │ │ │ │ │ ├── v3alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── dns_filter.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dns_filter.proto │ │ │ │ └── udp_proxy/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── udp_proxy.proto │ │ │ ├── internal_redirect/ │ │ │ │ ├── allow_listed_routes/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── allow_listed_routes_config.proto │ │ │ │ ├── previous_routes/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── previous_routes_config.proto │ │ │ │ └── safe_cross_scheme/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── safe_cross_scheme_config.proto │ │ │ ├── network/ │ │ │ │ └── socket_interface/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── default_socket_interface.proto │ │ │ ├── retry/ │ │ │ │ ├── host/ │ │ │ │ │ └── omit_host_metadata/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── omit_host_metadata_config.proto │ │ │ │ └── priority/ │ │ │ │ └── previous_priorities/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── previous_priorities_config.proto │ │ │ ├── stat_sinks/ │ │ │ │ └── wasm/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── wasm.proto │ │ │ ├── tracers/ │ │ │ │ ├── datadog/ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── datadog.proto │ │ │ │ ├── dynamic_ot/ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dynamic_ot.proto │ │ │ │ ├── lightstep/ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── lightstep.proto │ │ │ │ ├── opencensus/ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── opencensus.proto │ │ │ │ ├── xray/ │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── xray.proto │ │ │ │ └── zipkin/ │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── zipkin.proto │ │ │ ├── transport_sockets/ │ │ │ │ ├── alts/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── alts.proto │ │ │ │ ├── proxy_protocol/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── upstream_proxy_protocol.proto │ │ │ │ ├── quic/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── quic_transport.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── quic_transport.proto │ │ │ │ ├── raw_buffer/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── raw_buffer.proto │ │ │ │ ├── tap/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── tap.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── tap.proto │ │ │ │ └── tls/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── cert.proto │ │ │ │ │ ├── common.proto │ │ │ │ │ ├── secret.proto │ │ │ │ │ └── tls.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── common.proto │ │ │ │ ├── secret.proto │ │ │ │ └── tls.proto │ │ │ ├── upstreams/ │ │ │ │ └── http/ │ │ │ │ ├── generic/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── generic_connection_pool.proto │ │ │ │ ├── http/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── http_connection_pool.proto │ │ │ │ └── tcp/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── tcp_connection_pool.proto │ │ │ ├── wasm/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── wasm.proto │ │ │ └── watchdog/ │ │ │ ├── abort_action/ │ │ │ │ └── v3alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── abort_action.proto │ │ │ └── profile_action/ │ │ │ └── v3alpha/ │ │ │ ├── BUILD │ │ │ └── profile_action.proto │ │ ├── service/ │ │ │ ├── README.md │ │ │ ├── accesslog/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── als.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── als.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── als.proto │ │ │ ├── auth/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── attribute_context.proto │ │ │ │ │ └── external_auth.proto │ │ │ │ ├── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── external_auth.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── attribute_context.proto │ │ │ │ │ └── external_auth.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── attribute_context.proto │ │ │ │ └── external_auth.proto │ │ │ ├── cluster/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── cds.proto │ │ │ ├── discovery/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── ads.proto │ │ │ │ │ ├── hds.proto │ │ │ │ │ ├── rtds.proto │ │ │ │ │ └── sds.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── ads.proto │ │ │ │ │ └── discovery.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── ads.proto │ │ │ │ └── discovery.proto │ │ │ ├── endpoint/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── eds.proto │ │ │ ├── event_reporting/ │ │ │ │ ├── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── event_reporting_service.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── event_reporting_service.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── event_reporting_service.proto │ │ │ ├── extension/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── config_discovery.proto │ │ │ ├── health/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── hds.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── hds.proto │ │ │ ├── listener/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── lds.proto │ │ │ ├── load_stats/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── lrs.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── lrs.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── lrs.proto │ │ │ ├── metrics/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── metrics_service.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── metrics_service.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── metrics_service.proto │ │ │ ├── ratelimit/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rls.proto │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── rls.proto │ │ │ ├── route/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── rds.proto │ │ │ │ └── srds.proto │ │ │ ├── runtime/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── rtds.proto │ │ │ ├── secret/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── sds.proto │ │ │ ├── status/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── csds.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── csds.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── csds.proto │ │ │ ├── tap/ │ │ │ │ ├── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── common.proto │ │ │ │ │ └── tap.proto │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── tap.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── tap.proto │ │ │ └── trace/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── trace_service.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── trace_service.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── trace_service.proto │ │ └── type/ │ │ ├── BUILD │ │ ├── hash_policy.proto │ │ ├── http.proto │ │ ├── http_status.proto │ │ ├── matcher/ │ │ │ ├── BUILD │ │ │ ├── metadata.proto │ │ │ ├── node.proto │ │ │ ├── number.proto │ │ │ ├── path.proto │ │ │ ├── regex.proto │ │ │ ├── string.proto │ │ │ ├── struct.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── metadata.proto │ │ │ │ ├── node.proto │ │ │ │ ├── number.proto │ │ │ │ ├── path.proto │ │ │ │ ├── regex.proto │ │ │ │ ├── string.proto │ │ │ │ ├── struct.proto │ │ │ │ └── value.proto │ │ │ ├── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── metadata.proto │ │ │ │ ├── node.proto │ │ │ │ ├── number.proto │ │ │ │ ├── path.proto │ │ │ │ ├── regex.proto │ │ │ │ ├── string.proto │ │ │ │ ├── struct.proto │ │ │ │ └── value.proto │ │ │ └── value.proto │ │ ├── metadata/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── metadata.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── metadata.proto │ │ ├── percent.proto │ │ ├── range.proto │ │ ├── semantic_version.proto │ │ ├── token_bucket.proto │ │ ├── tracing/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── custom_tag.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── custom_tag.proto │ │ └── v3/ │ │ ├── BUILD │ │ ├── hash_policy.proto │ │ ├── http.proto │ │ ├── http_status.proto │ │ ├── percent.proto │ │ ├── range.proto │ │ ├── ratelimit_unit.proto │ │ ├── semantic_version.proto │ │ └── token_bucket.proto │ ├── examples/ │ │ └── service_envoy/ │ │ ├── BUILD │ │ ├── http_connection_manager.pb │ │ └── listeners.pb │ ├── test/ │ │ ├── build/ │ │ │ ├── BUILD │ │ │ ├── build_test.cc │ │ │ └── go_build_test.go │ │ └── validate/ │ │ ├── BUILD │ │ └── pgv_test.cc │ ├── tools/ │ │ ├── BUILD │ │ ├── data/ │ │ │ ├── tap2pcap_h2_ipv4.pb_text │ │ │ └── tap2pcap_h2_ipv4.txt │ │ ├── generate_listeners.py │ │ ├── generate_listeners_test.py │ │ ├── tap2pcap.py │ │ └── tap2pcap_test.py │ ├── versioning/ │ │ └── BUILD │ └── xds_protocol.rst ├── bazel/ │ ├── BUILD │ ├── DEVELOPER.md │ ├── EXTERNAL_DEPS.md │ ├── PPROF.md │ ├── README.md │ ├── antlr.patch │ ├── api_binding.bzl │ ├── api_repositories.bzl │ ├── boringssl_static.patch │ ├── coverage/ │ │ ├── BUILD │ │ ├── collect_cc_coverage.sh │ │ └── fuzz_coverage_wrapper.sh │ ├── crates.bzl │ ├── dependency_imports.bzl │ ├── dev_binding.bzl │ ├── envoy_binary.bzl │ ├── envoy_build_system.bzl │ ├── envoy_internal.bzl │ ├── envoy_library.bzl │ ├── envoy_select.bzl │ ├── envoy_test.bzl │ ├── external/ │ │ ├── BUILD │ │ ├── boringssl_fips.BUILD │ │ ├── boringssl_fips.genrule_cmd │ │ ├── boringssl_fips.patch │ │ ├── cargo/ │ │ │ ├── BUILD │ │ │ └── remote/ │ │ │ ├── BUILD │ │ │ ├── ahash-0.3.8.BUILD │ │ │ ├── autocfg-1.0.0.BUILD │ │ │ ├── cfg-if-0.1.10.BUILD │ │ │ ├── hashbrown-0.7.2.BUILD │ │ │ ├── libc-0.2.74.BUILD │ │ │ ├── log-0.4.11.BUILD │ │ │ ├── memory_units-0.4.0.BUILD │ │ │ ├── proxy-wasm-0.1.2.BUILD │ │ │ ├── wee_alloc-0.4.5.BUILD │ │ │ ├── winapi-0.3.9.BUILD │ │ │ ├── winapi-i686-pc-windows-gnu-0.4.0.BUILD │ │ │ └── winapi-x86_64-pc-windows-gnu-0.4.0.BUILD │ │ ├── compiler_rt.BUILD │ │ ├── fmtlib.BUILD │ │ ├── http-parser.BUILD │ │ ├── kafka_int32.patch │ │ ├── libcircllhist.BUILD │ │ ├── libprotobuf_mutator.BUILD │ │ ├── proxy_wasm_cpp_host.BUILD │ │ ├── quiche.BUILD │ │ ├── quiche.genrule_cmd │ │ ├── rapidjson.BUILD │ │ ├── spdlog.BUILD │ │ ├── sqlparser.BUILD │ │ ├── tclap.BUILD │ │ ├── twitter_common_finagle_thrift.BUILD │ │ ├── twitter_common_lang.BUILD │ │ ├── twitter_common_rpc.BUILD │ │ ├── wee8.BUILD │ │ ├── wee8.genrule_cmd │ │ ├── wee8.patch │ │ └── xxhash.BUILD │ ├── foreign_cc/ │ │ ├── BUILD │ │ ├── llvm.patch │ │ ├── luajit.patch │ │ ├── moonjit.patch │ │ ├── nghttp2.patch │ │ └── zlib.patch │ ├── gen_sh_test_runner.sh │ ├── genrule_repository.bzl │ ├── get_workspace_status │ ├── io_opentracing_cpp.patch │ ├── protobuf.patch │ ├── repositories.bzl │ ├── repositories_extra.bzl │ ├── repository_locations.bzl │ ├── rules_go.patch │ ├── setup_clang.sh │ ├── setup_local_tsan.sh │ ├── sh_test_wrapper.sh │ ├── tclap-win64-ull-sizet.patch │ ├── test/ │ │ ├── BUILD │ │ └── verify_tap_test.sh │ ├── test_for_benchmark_wrapper.sh │ ├── toolchains/ │ │ └── BUILD │ ├── upb.patch │ └── wasm/ │ ├── BUILD │ └── wasm.bzl ├── ci/ │ ├── Dockerfile-envoy │ ├── Dockerfile-envoy-alpine │ ├── Dockerfile-envoy-google-vrp │ ├── README.md │ ├── WORKSPACE.filter.example │ ├── api_mirror.sh │ ├── build_setup.sh │ ├── check_and_fix_format.sh │ ├── do_ci.sh │ ├── do_circle_ci.sh │ ├── do_coverity_local.sh │ ├── docker-entrypoint.sh │ ├── docker_ci.sh │ ├── docker_rebuild_google-vrp.sh │ ├── envoy_build_sha.sh │ ├── filter_example_mirror.sh │ ├── filter_example_setup.sh │ ├── flaky_test/ │ │ ├── process_xml.py │ │ ├── requirements.txt │ │ ├── run_process_xml.sh │ │ └── run_process_xml_mac.sh │ ├── go_mirror.sh │ ├── mac_ci_setup.sh │ ├── mac_ci_steps.sh │ ├── repokitteh/ │ │ └── modules/ │ │ ├── azure_pipelines.star │ │ └── ownerscheck.star │ ├── run_clang_tidy.sh │ ├── run_envoy_docker.sh │ ├── setup_cache.sh │ ├── upload_gcs_artifact.sh │ ├── verify_examples.sh │ └── windows_ci_steps.sh ├── configs/ │ ├── BUILD │ ├── Dockerfile │ ├── access_log_format_helper_v2.template.yaml │ ├── configgen.py │ ├── configgen.sh │ ├── encapsulate_in_connect.v3.yaml │ ├── envoy_double_proxy_v2.template.yaml │ ├── envoy_front_proxy_v2.template.yaml │ ├── envoy_router_v2.template.yaml │ ├── envoy_service_to_service_v2.template.yaml │ ├── freebind/ │ │ ├── README.md │ │ └── freebind.yaml │ ├── google-vrp/ │ │ ├── envoy-edge.yaml │ │ ├── envoy-origin.yaml │ │ ├── launch_envoy.sh │ │ └── supervisor.conf │ ├── google_com_proxy.v2.yaml │ ├── original-dst-cluster/ │ │ ├── README.md │ │ ├── netns_cleanup.sh │ │ ├── netns_setup.sh │ │ └── proxy_config.yaml │ ├── requirements.txt │ ├── routing_helper_v2.template.yaml │ ├── terminate_connect.v3.yaml │ └── using_deprecated_config.v2.yaml ├── docs/ │ ├── BUILD │ ├── README.md │ ├── _ext/ │ │ └── validating_code_block.py │ ├── build.sh │ ├── conf.py │ ├── empty_extensions.json │ ├── generate_extension_db.py │ ├── generate_extension_rst.py │ ├── generate_external_dep_rst.py │ ├── protodoc_manifest.yaml │ ├── publish.sh │ ├── requirements.txt │ └── root/ │ ├── _static/ │ │ ├── css/ │ │ │ └── envoy.css │ │ ├── placeholder │ │ └── searchtools.js │ ├── about_docs.rst │ ├── api/ │ │ ├── api.rst │ │ ├── api_supported_versions.rst │ │ └── client_features.rst │ ├── api-v2/ │ │ ├── admin/ │ │ │ └── admin.rst │ │ ├── api.rst │ │ ├── bootstrap/ │ │ │ └── bootstrap.rst │ │ ├── clusters/ │ │ │ └── clusters.rst │ │ ├── common_messages/ │ │ │ └── common_messages.rst │ │ ├── config/ │ │ │ ├── accesslog/ │ │ │ │ └── accesslog.rst │ │ │ ├── cluster/ │ │ │ │ └── cluster.rst │ │ │ ├── common/ │ │ │ │ └── common.rst │ │ │ ├── config.rst │ │ │ ├── filter/ │ │ │ │ ├── dubbo/ │ │ │ │ │ └── dubbo.rst │ │ │ │ ├── filter.rst │ │ │ │ ├── http/ │ │ │ │ │ └── http.rst │ │ │ │ ├── listener/ │ │ │ │ │ └── listener.rst │ │ │ │ ├── network/ │ │ │ │ │ └── network.rst │ │ │ │ ├── thrift/ │ │ │ │ │ └── thrift.rst │ │ │ │ └── udp/ │ │ │ │ └── udp.rst │ │ │ ├── grpc_credential/ │ │ │ │ └── grpc_credential.rst │ │ │ ├── health_checker/ │ │ │ │ └── health_checker.rst │ │ │ ├── listener/ │ │ │ │ └── listener.rst │ │ │ ├── rbac/ │ │ │ │ └── rbac.rst │ │ │ ├── resource_monitor/ │ │ │ │ └── resource_monitor.rst │ │ │ ├── retry/ │ │ │ │ └── retry.rst │ │ │ ├── trace/ │ │ │ │ └── trace.rst │ │ │ └── transport_socket/ │ │ │ └── transport_socket.rst │ │ ├── data/ │ │ │ ├── accesslog/ │ │ │ │ └── accesslog.rst │ │ │ ├── cluster/ │ │ │ │ └── cluster.rst │ │ │ ├── core/ │ │ │ │ └── core.rst │ │ │ ├── data.rst │ │ │ ├── dns/ │ │ │ │ └── dns.rst │ │ │ └── tap/ │ │ │ └── tap.rst │ │ ├── http_routes/ │ │ │ └── http_routes.rst │ │ ├── listeners/ │ │ │ └── listeners.rst │ │ ├── service/ │ │ │ └── service.rst │ │ └── types/ │ │ └── types.rst │ ├── api-v3/ │ │ ├── admin/ │ │ │ └── admin.rst │ │ ├── api.rst │ │ ├── bootstrap/ │ │ │ └── bootstrap.rst │ │ ├── clusters/ │ │ │ └── clusters.rst │ │ ├── common_messages/ │ │ │ └── common_messages.rst │ │ ├── config/ │ │ │ ├── accesslog/ │ │ │ │ └── accesslog.rst │ │ │ ├── cluster/ │ │ │ │ └── cluster.rst │ │ │ ├── common/ │ │ │ │ └── common.rst │ │ │ ├── compression/ │ │ │ │ └── compression.rst │ │ │ ├── config.rst │ │ │ ├── endpoint/ │ │ │ │ └── endpoint.rst │ │ │ ├── filter/ │ │ │ │ ├── dubbo/ │ │ │ │ │ └── dubbo.rst │ │ │ │ ├── filter.rst │ │ │ │ ├── http/ │ │ │ │ │ └── http.rst │ │ │ │ ├── listener/ │ │ │ │ │ └── listener.rst │ │ │ │ ├── network/ │ │ │ │ │ └── network.rst │ │ │ │ ├── thrift/ │ │ │ │ │ └── thrift.rst │ │ │ │ └── udp/ │ │ │ │ └── udp.rst │ │ │ ├── grpc_credential/ │ │ │ │ └── grpc_credential.rst │ │ │ ├── health_checker/ │ │ │ │ └── health_checker.rst │ │ │ ├── internal_redirect/ │ │ │ │ └── internal_redirect.rst │ │ │ ├── rbac/ │ │ │ │ └── rbac.rst │ │ │ ├── resource_monitor/ │ │ │ │ └── resource_monitor.rst │ │ │ ├── retry/ │ │ │ │ └── retry.rst │ │ │ ├── trace/ │ │ │ │ └── trace.rst │ │ │ ├── transport_socket/ │ │ │ │ └── transport_socket.rst │ │ │ ├── upstream/ │ │ │ │ └── upstream.rst │ │ │ ├── wasm/ │ │ │ │ └── wasm.rst │ │ │ └── watchdog/ │ │ │ └── watchdog.rst │ │ ├── data/ │ │ │ ├── accesslog/ │ │ │ │ └── accesslog.rst │ │ │ ├── cluster/ │ │ │ │ └── cluster.rst │ │ │ ├── core/ │ │ │ │ └── core.rst │ │ │ ├── data.rst │ │ │ ├── dns/ │ │ │ │ └── dns.rst │ │ │ └── tap/ │ │ │ └── tap.rst │ │ ├── http_routes/ │ │ │ └── http_routes.rst │ │ ├── listeners/ │ │ │ └── listeners.rst │ │ ├── service/ │ │ │ └── service.rst │ │ └── types/ │ │ └── types.rst │ ├── configuration/ │ │ ├── advanced/ │ │ │ ├── advanced.rst │ │ │ └── well_known_dynamic_metadata.rst │ │ ├── best_practices/ │ │ │ ├── _include/ │ │ │ │ └── edge.yaml │ │ │ ├── best_practices.rst │ │ │ ├── edge.rst │ │ │ └── level_two.rst │ │ ├── configuration.rst │ │ ├── http/ │ │ │ ├── http.rst │ │ │ ├── http_conn_man/ │ │ │ │ ├── header_casing.rst │ │ │ │ ├── header_sanitizing.rst │ │ │ │ ├── headers.rst │ │ │ │ ├── http_conn_man.rst │ │ │ │ ├── local_reply.rst │ │ │ │ ├── overview.rst │ │ │ │ ├── rds.rst │ │ │ │ ├── response_code_details.rst │ │ │ │ ├── route_matching.rst │ │ │ │ ├── runtime.rst │ │ │ │ ├── stats.rst │ │ │ │ ├── traffic_splitting.rst │ │ │ │ └── vhds.rst │ │ │ └── http_filters/ │ │ │ ├── _include/ │ │ │ │ ├── dns-cache-circuit-breaker.yaml │ │ │ │ ├── grpc-reverse-bridge-filter.yaml │ │ │ │ └── grpc-transcoder-filter.yaml │ │ │ ├── adaptive_concurrency_filter.rst │ │ │ ├── admission_control_filter.rst │ │ │ ├── aws_lambda_filter.rst │ │ │ ├── aws_request_signing_filter.rst │ │ │ ├── buffer_filter.rst │ │ │ ├── cdn_loop_filter.rst │ │ │ ├── compressor_filter.rst │ │ │ ├── cors_filter.rst │ │ │ ├── csrf_filter.rst │ │ │ ├── decompressor_filter.rst │ │ │ ├── dynamic_forward_proxy_filter.rst │ │ │ ├── dynamodb_filter.rst │ │ │ ├── ext_authz_filter.rst │ │ │ ├── fault_filter.rst │ │ │ ├── grpc_http1_bridge_filter.rst │ │ │ ├── grpc_http1_reverse_bridge_filter.rst │ │ │ ├── grpc_json_transcoder_filter.rst │ │ │ ├── grpc_stats_filter.rst │ │ │ ├── grpc_web_filter.rst │ │ │ ├── gzip_filter.rst │ │ │ ├── header_to_metadata_filter.rst │ │ │ ├── health_check_filter.rst │ │ │ ├── http_filters.rst │ │ │ ├── ip_tagging_filter.rst │ │ │ ├── jwt_authn_filter.rst │ │ │ ├── local_rate_limit_filter.rst │ │ │ ├── lua_filter.rst │ │ │ ├── oauth2_filter.rst │ │ │ ├── on_demand_updates_filter.rst │ │ │ ├── original_src_filter.rst │ │ │ ├── rate_limit_filter.rst │ │ │ ├── rbac_filter.rst │ │ │ ├── router_filter.rst │ │ │ ├── squash_filter.rst │ │ │ ├── tap_filter.rst │ │ │ └── wasm_filter.rst │ │ ├── listeners/ │ │ │ ├── lds.rst │ │ │ ├── listener_filters/ │ │ │ │ ├── http_inspector.rst │ │ │ │ ├── listener_filters.rst │ │ │ │ ├── original_dst_filter.rst │ │ │ │ ├── original_src_filter.rst │ │ │ │ ├── proxy_protocol.rst │ │ │ │ └── tls_inspector.rst │ │ │ ├── listeners.rst │ │ │ ├── network_filters/ │ │ │ │ ├── _include/ │ │ │ │ │ └── sni-dynamic-forward-proxy-filter.yaml │ │ │ │ ├── client_ssl_auth_filter.rst │ │ │ │ ├── direct_response_filter.rst │ │ │ │ ├── dubbo_proxy_filter.rst │ │ │ │ ├── echo_filter.rst │ │ │ │ ├── ext_authz_filter.rst │ │ │ │ ├── kafka_broker_filter.rst │ │ │ │ ├── local_rate_limit_filter.rst │ │ │ │ ├── mongo_proxy_filter.rst │ │ │ │ ├── mysql_proxy_filter.rst │ │ │ │ ├── network_filters.rst │ │ │ │ ├── postgres_proxy_filter.rst │ │ │ │ ├── rate_limit_filter.rst │ │ │ │ ├── rbac_filter.rst │ │ │ │ ├── redis_proxy_filter.rst │ │ │ │ ├── rocketmq_proxy_filter.rst │ │ │ │ ├── sni_cluster_filter.rst │ │ │ │ ├── sni_dynamic_forward_proxy_filter.rst │ │ │ │ ├── tcp_proxy_filter.rst │ │ │ │ ├── thrift_proxy_filter.rst │ │ │ │ ├── wasm_filter.rst │ │ │ │ └── zookeeper_proxy_filter.rst │ │ │ ├── overview.rst │ │ │ ├── runtime.rst │ │ │ ├── stats.rst │ │ │ └── udp_filters/ │ │ │ ├── _include/ │ │ │ │ └── udp-proxy.yaml │ │ │ ├── dns_filter.rst │ │ │ ├── udp_filters.rst │ │ │ └── udp_proxy.rst │ │ ├── observability/ │ │ │ ├── access_log/ │ │ │ │ ├── access_log.rst │ │ │ │ ├── overview.rst │ │ │ │ ├── stats.rst │ │ │ │ └── usage.rst │ │ │ ├── application_logging.rst │ │ │ ├── observability.rst │ │ │ └── statistics.rst │ │ ├── operations/ │ │ │ ├── operations.rst │ │ │ ├── overload_manager/ │ │ │ │ └── overload_manager.rst │ │ │ ├── runtime.rst │ │ │ └── tools/ │ │ │ └── router_check.rst │ │ ├── other_features/ │ │ │ ├── other_features.rst │ │ │ ├── rate_limit.rst │ │ │ ├── wasm.rst │ │ │ └── wasm_stat_sink.rst │ │ ├── other_protocols/ │ │ │ ├── dubbo_filters/ │ │ │ │ ├── dubbo_filters.rst │ │ │ │ └── router_filter.rst │ │ │ ├── other_protocols.rst │ │ │ └── thrift_filters/ │ │ │ ├── rate_limit_filter.rst │ │ │ ├── router_filter.rst │ │ │ └── thrift_filters.rst │ │ ├── overview/ │ │ │ ├── bootstrap.rst │ │ │ ├── examples.rst │ │ │ ├── extension.rst │ │ │ ├── introduction.rst │ │ │ ├── mgmt_server.rst │ │ │ ├── overview.rst │ │ │ ├── versioning.rst │ │ │ └── xds_api.rst │ │ ├── security/ │ │ │ ├── secret.rst │ │ │ └── security.rst │ │ └── upstream/ │ │ ├── cluster_manager/ │ │ │ ├── cds.rst │ │ │ ├── cluster_circuit_breakers.rst │ │ │ ├── cluster_hc.rst │ │ │ ├── cluster_manager.rst │ │ │ ├── cluster_runtime.rst │ │ │ ├── cluster_stats.rst │ │ │ └── overview.rst │ │ ├── health_checkers/ │ │ │ ├── health_checkers.rst │ │ │ └── redis.rst │ │ └── upstream.rst │ ├── extending/ │ │ └── extending.rst │ ├── faq/ │ │ ├── api/ │ │ │ ├── control_plane.rst │ │ │ ├── control_plane_version_support.rst │ │ │ ├── envoy_upgrade_v3.rst │ │ │ ├── envoy_v2_support.rst │ │ │ ├── envoy_v3.rst │ │ │ ├── extensions.rst │ │ │ ├── incremental.rst │ │ │ ├── package_naming.rst │ │ │ └── why_versioning.rst │ │ ├── build/ │ │ │ ├── binaries.rst │ │ │ └── boringssl.rst │ │ ├── configuration/ │ │ │ ├── deprecation.rst │ │ │ ├── edge.rst │ │ │ ├── flow_control.rst │ │ │ ├── level_two.rst │ │ │ ├── resource_limits.rst │ │ │ ├── sni.rst │ │ │ ├── timeouts.rst │ │ │ ├── zipkin_tracing.rst │ │ │ └── zone_aware_routing.rst │ │ ├── debugging/ │ │ │ ├── why_is_envoy_404ing_connect_requests.rst │ │ │ ├── why_is_envoy_sending_413s.rst │ │ │ ├── why_is_envoy_sending_http2_resets.rst │ │ │ ├── why_is_envoy_sending_internal_responses.rst │ │ │ └── why_is_my_route_not_found.rst │ │ ├── extensions/ │ │ │ └── contract.rst │ │ ├── load_balancing/ │ │ │ ├── concurrency_lb.rst │ │ │ ├── disable_circuit_breaking.rst │ │ │ ├── lb_panic_threshold.rst │ │ │ ├── region_failover.rst │ │ │ └── transient_failures.rst │ │ ├── overview.rst │ │ └── performance/ │ │ ├── how_fast_is_envoy.rst │ │ └── how_to_benchmark_envoy.rst │ ├── index.rst │ ├── install/ │ │ ├── building.rst │ │ ├── install.rst │ │ ├── ref_configs.rst │ │ ├── sandboxes/ │ │ │ └── local_docker_build.rst │ │ └── tools/ │ │ ├── config_load_check_tool.rst │ │ ├── route_table_check_tool.rst │ │ ├── schema_validator_check_tool.rst │ │ └── tools.rst │ ├── intro/ │ │ ├── _include/ │ │ │ └── life-of-a-request.yaml │ │ ├── arch_overview/ │ │ │ ├── advanced/ │ │ │ │ ├── advanced.rst │ │ │ │ └── data_sharing_between_filters.rst │ │ │ ├── arch_overview.rst │ │ │ ├── http/ │ │ │ │ ├── http.rst │ │ │ │ ├── http_connection_management.rst │ │ │ │ ├── http_filters.rst │ │ │ │ ├── http_proxy.rst │ │ │ │ ├── http_routing.rst │ │ │ │ └── upgrades.rst │ │ │ ├── intro/ │ │ │ │ ├── intro.rst │ │ │ │ ├── terminology.rst │ │ │ │ └── threading_model.rst │ │ │ ├── listeners/ │ │ │ │ ├── dns_filter.rst │ │ │ │ ├── listener_filters.rst │ │ │ │ ├── listeners.rst │ │ │ │ ├── listeners_toc.rst │ │ │ │ ├── network_filters.rst │ │ │ │ ├── tcp_proxy.rst │ │ │ │ └── udp_proxy.rst │ │ │ ├── observability/ │ │ │ │ ├── access_logging.rst │ │ │ │ ├── observability.rst │ │ │ │ ├── statistics.rst │ │ │ │ └── tracing.rst │ │ │ ├── operations/ │ │ │ │ ├── draining.rst │ │ │ │ ├── dynamic_configuration.rst │ │ │ │ ├── hot_restart.rst │ │ │ │ ├── init.rst │ │ │ │ ├── operations.rst │ │ │ │ ├── overload_manager.rst │ │ │ │ └── runtime.rst │ │ │ ├── other_features/ │ │ │ │ ├── compression/ │ │ │ │ │ └── libraries.rst │ │ │ │ ├── global_rate_limiting.rst │ │ │ │ ├── ip_transparency.rst │ │ │ │ ├── local_rate_limiting.rst │ │ │ │ ├── other_features.rst │ │ │ │ └── scripting.rst │ │ │ ├── other_protocols/ │ │ │ │ ├── dynamo.rst │ │ │ │ ├── grpc.rst │ │ │ │ ├── mongo.rst │ │ │ │ ├── other_protocols.rst │ │ │ │ ├── postgres.rst │ │ │ │ └── redis.rst │ │ │ ├── security/ │ │ │ │ ├── _include/ │ │ │ │ │ └── ssl.yaml │ │ │ │ ├── ext_authz_filter.rst │ │ │ │ ├── external_deps.rst │ │ │ │ ├── google_vrp.rst │ │ │ │ ├── jwt_authn_filter.rst │ │ │ │ ├── rbac_filter.rst │ │ │ │ ├── security.rst │ │ │ │ ├── ssl.rst │ │ │ │ └── threat_model.rst │ │ │ └── upstream/ │ │ │ ├── aggregate_cluster.rst │ │ │ ├── circuit_breaking.rst │ │ │ ├── cluster_manager.rst │ │ │ ├── connection_pooling.rst │ │ │ ├── health_checking.rst │ │ │ ├── load_balancing/ │ │ │ │ ├── degraded.rst │ │ │ │ ├── load_balancers.rst │ │ │ │ ├── load_balancing.rst │ │ │ │ ├── locality_weight.rst │ │ │ │ ├── original_dst.rst │ │ │ │ ├── overprovisioning.rst │ │ │ │ ├── overview.rst │ │ │ │ ├── panic_threshold.rst │ │ │ │ ├── priority.rst │ │ │ │ ├── subsets.rst │ │ │ │ └── zone_aware.rst │ │ │ ├── load_reporting_service.rst │ │ │ ├── outlier.rst │ │ │ ├── service_discovery.rst │ │ │ ├── upstream.rst │ │ │ └── upstream_filters.rst │ │ ├── deployment_types/ │ │ │ ├── deployment_types.rst │ │ │ ├── double_proxy.rst │ │ │ ├── front_proxy.rst │ │ │ └── service_to_service.rst │ │ ├── deprecated.rst │ │ ├── getting_help.rst │ │ ├── intro.rst │ │ ├── life_of_a_request.rst │ │ ├── version_history.rst │ │ └── what_is_envoy.rst │ ├── operations/ │ │ ├── admin.rst │ │ ├── certificates.rst │ │ ├── cli.rst │ │ ├── fs_flags.rst │ │ ├── hot_restarter.rst │ │ ├── operations.rst │ │ ├── performance.rst │ │ ├── runtime.rst │ │ ├── stats_overview.rst │ │ └── traffic_tapping.rst │ ├── start/ │ │ ├── sandboxes/ │ │ │ ├── _include/ │ │ │ │ └── docker-env-setup.rst │ │ │ ├── cache.rst │ │ │ ├── cors.rst │ │ │ ├── csrf.rst │ │ │ ├── ext_authz.rst │ │ │ ├── fault_injection.rst │ │ │ ├── front_proxy.rst │ │ │ ├── grpc_bridge.rst │ │ │ ├── jaeger_native_tracing.rst │ │ │ ├── jaeger_tracing.rst │ │ │ ├── load_reporting_service.rst │ │ │ ├── lua.rst │ │ │ ├── mysql.rst │ │ │ ├── redis.rst │ │ │ └── zipkin_tracing.rst │ │ └── start.rst │ └── version_history/ │ ├── current.rst │ ├── v1.0.0.rst │ ├── v1.1.0.rst │ ├── v1.10.0.rst │ ├── v1.11.0.rst │ ├── v1.11.1.rst │ ├── v1.11.2.rst │ ├── v1.12.0.rst │ ├── v1.12.1.rst │ ├── v1.12.2.rst │ ├── v1.12.3.rst │ ├── v1.12.4.rst │ ├── v1.12.5.rst │ ├── v1.12.6.rst │ ├── v1.12.7.rst │ ├── v1.13.0.rst │ ├── v1.13.1.rst │ ├── v1.13.2.rst │ ├── v1.13.3.rst │ ├── v1.13.4.rst │ ├── v1.13.5.rst │ ├── v1.13.6.rst │ ├── v1.14.0.rst │ ├── v1.14.1.rst │ ├── v1.14.2.rst │ ├── v1.14.3.rst │ ├── v1.14.4.rst │ ├── v1.14.5.rst │ ├── v1.15.0.rst │ ├── v1.15.1.rst │ ├── v1.15.2.rst │ ├── v1.2.0.rst │ ├── v1.3.0.rst │ ├── v1.4.0.rst │ ├── v1.5.0.rst │ ├── v1.6.0.rst │ ├── v1.7.0.rst │ ├── v1.8.0.rst │ ├── v1.9.0.rst │ ├── v1.9.1.rst │ └── version_history.rst ├── examples/ │ ├── BUILD │ ├── DEVELOPER.md │ ├── cache/ │ │ ├── Dockerfile-frontenvoy │ │ ├── Dockerfile-service │ │ ├── README.md │ │ ├── docker-compose.yaml │ │ ├── front-envoy.yaml │ │ ├── responses.yaml │ │ ├── service-envoy.yaml │ │ ├── service.py │ │ ├── start_service.sh │ │ └── verify.sh │ ├── cors/ │ │ ├── README.md │ │ ├── backend/ │ │ │ ├── Dockerfile-frontenvoy │ │ │ ├── Dockerfile-service │ │ │ ├── docker-compose.yaml │ │ │ ├── front-envoy.yaml │ │ │ ├── service-envoy.yaml │ │ │ ├── service.py │ │ │ └── start_service.sh │ │ ├── frontend/ │ │ │ ├── Dockerfile-frontenvoy │ │ │ ├── Dockerfile-service │ │ │ ├── docker-compose.yaml │ │ │ ├── front-envoy.yaml │ │ │ ├── index.html │ │ │ ├── service-envoy.yaml │ │ │ ├── service.py │ │ │ └── start_service.sh │ │ └── verify.sh │ ├── csrf/ │ │ ├── README.md │ │ ├── crosssite/ │ │ │ ├── Dockerfile-frontenvoy │ │ │ ├── Dockerfile-service │ │ │ ├── docker-compose.yml │ │ │ ├── front-envoy.yaml │ │ │ └── service.py │ │ ├── index.html │ │ ├── samesite/ │ │ │ ├── Dockerfile-frontenvoy │ │ │ ├── Dockerfile-service │ │ │ ├── docker-compose.yml │ │ │ ├── front-envoy.yaml │ │ │ └── service.py │ │ ├── service-envoy.yaml │ │ ├── start_service.sh │ │ └── verify.sh │ ├── ext_authz/ │ │ ├── Dockerfile-frontenvoy │ │ ├── README.md │ │ ├── auth/ │ │ │ ├── grpc-service/ │ │ │ │ ├── Dockerfile │ │ │ │ ├── Makefile │ │ │ │ ├── go.mod │ │ │ │ ├── go.sum │ │ │ │ ├── main.go │ │ │ │ └── pkg/ │ │ │ │ └── auth/ │ │ │ │ ├── users.go │ │ │ │ ├── v2/ │ │ │ │ │ └── auth.go │ │ │ │ └── v3/ │ │ │ │ └── auth.go │ │ │ ├── http-service/ │ │ │ │ ├── Dockerfile │ │ │ │ └── server.js │ │ │ └── users.json │ │ ├── config/ │ │ │ ├── grpc-service/ │ │ │ │ ├── v2.yaml │ │ │ │ └── v3.yaml │ │ │ ├── http-service.yaml │ │ │ └── opa-service/ │ │ │ ├── policy.rego │ │ │ └── v2.yaml │ │ ├── docker-compose.yaml │ │ ├── run_envoy.sh │ │ ├── upstream/ │ │ │ └── service/ │ │ │ ├── Dockerfile │ │ │ └── server.py │ │ ├── users.json │ │ └── verify.sh │ ├── fault-injection/ │ │ ├── .gitignore │ │ ├── Dockerfile-envoy │ │ ├── README.md │ │ ├── disable_abort_fault_injection.sh │ │ ├── disable_delay_fault_injection.sh │ │ ├── docker-compose.yaml │ │ ├── enable_abort_fault_injection.sh │ │ ├── enable_delay_fault_injection.sh │ │ ├── envoy.yaml │ │ ├── send_request.sh │ │ └── verify.sh │ ├── front-proxy/ │ │ ├── Dockerfile-frontenvoy │ │ ├── Dockerfile-jaeger-service │ │ ├── Dockerfile-service │ │ ├── README.md │ │ ├── docker-compose.yaml │ │ ├── front-envoy.yaml │ │ ├── service-envoy.yaml │ │ ├── service.py │ │ ├── start_service.sh │ │ └── verify.sh │ ├── grpc-bridge/ │ │ ├── .gitignore │ │ ├── Dockerfile-client │ │ ├── Dockerfile-server │ │ ├── README.md │ │ ├── client/ │ │ │ ├── Dockerfile │ │ │ ├── client.py │ │ │ ├── envoy-proxy.yaml │ │ │ ├── kv/ │ │ │ │ └── __init__.py │ │ │ └── requirements.txt │ │ ├── docker-compose-protos.yaml │ │ ├── docker-compose.yaml │ │ ├── protos/ │ │ │ └── kv.proto │ │ ├── server/ │ │ │ ├── Dockerfile │ │ │ ├── envoy-proxy.yaml │ │ │ ├── go.mod │ │ │ ├── go.sum │ │ │ ├── kv/ │ │ │ │ └── go.mod │ │ │ └── service.go │ │ └── verify.sh │ ├── jaeger-native-tracing/ │ │ ├── Dockerfile-frontenvoy │ │ ├── README.md │ │ ├── docker-compose.yaml │ │ ├── front-envoy-jaeger.yaml │ │ ├── install-jaeger-plugin.sh │ │ ├── service1-envoy-jaeger.yaml │ │ ├── service2-envoy-jaeger.yaml │ │ └── verify.sh │ ├── jaeger-tracing/ │ │ ├── Dockerfile-frontenvoy │ │ ├── README.md │ │ ├── docker-compose.yaml │ │ ├── front-envoy-jaeger.yaml │ │ ├── service1-envoy-jaeger.yaml │ │ ├── service2-envoy-jaeger.yaml │ │ └── verify.sh │ ├── load-reporting-service/ │ │ ├── Dockerfile-http-server │ │ ├── Dockerfile-lrs │ │ ├── README.md │ │ ├── docker-compose.yaml │ │ ├── go.mod │ │ ├── go.sum │ │ ├── http_server.py │ │ ├── main.go │ │ ├── send_requests.sh │ │ ├── server/ │ │ │ └── lrs_server.go │ │ ├── service-envoy-w-lrs.yaml │ │ ├── start_service.sh │ │ └── verify.sh │ ├── lua/ │ │ ├── Dockerfile-proxy │ │ ├── Dockerfile-web-service │ │ ├── README.md │ │ ├── docker-compose.yaml │ │ ├── envoy.yaml │ │ ├── lib/ │ │ │ └── mylibrary.lua │ │ └── verify.sh │ ├── mysql/ │ │ ├── Dockerfile-mysql │ │ ├── Dockerfile-proxy │ │ ├── README.md │ │ ├── docker-compose.yaml │ │ ├── envoy.yaml │ │ └── verify.sh │ ├── redis/ │ │ ├── Dockerfile-proxy │ │ ├── Dockerfile-redis │ │ ├── README.md │ │ ├── docker-compose.yaml │ │ ├── envoy.yaml │ │ └── verify.sh │ ├── verify-common.sh │ ├── wasm/ │ │ ├── BUILD │ │ ├── Dockerfile-proxy │ │ ├── Dockerfile-web-service │ │ ├── README.md │ │ ├── docker-compose.yaml │ │ ├── envoy.yaml │ │ ├── envoy_filter_http_wasm_example.cc │ │ └── verify.sh │ └── zipkin-tracing/ │ ├── Dockerfile-frontenvoy │ ├── README.md │ ├── docker-compose.yaml │ ├── front-envoy-zipkin.yaml │ ├── service1-envoy-zipkin.yaml │ ├── service2-envoy-zipkin.yaml │ └── verify.sh ├── generated_api_shadow/ │ ├── BUILD │ ├── README.md │ ├── bazel/ │ │ ├── BUILD │ │ ├── api_build_system.bzl │ │ ├── envoy_http_archive.bzl │ │ ├── external_proto_deps.bzl │ │ ├── repositories.bzl │ │ └── repository_locations.bzl │ └── envoy/ │ ├── admin/ │ │ ├── v2alpha/ │ │ │ ├── BUILD │ │ │ ├── certs.proto │ │ │ ├── clusters.proto │ │ │ ├── config_dump.proto │ │ │ ├── listeners.proto │ │ │ ├── memory.proto │ │ │ ├── metrics.proto │ │ │ ├── mutex_stats.proto │ │ │ ├── server_info.proto │ │ │ └── tap.proto │ │ ├── v3/ │ │ │ ├── BUILD │ │ │ ├── certs.proto │ │ │ ├── clusters.proto │ │ │ ├── config_dump.proto │ │ │ ├── init_dump.proto │ │ │ ├── listeners.proto │ │ │ ├── memory.proto │ │ │ ├── metrics.proto │ │ │ ├── mutex_stats.proto │ │ │ ├── server_info.proto │ │ │ └── tap.proto │ │ └── v4alpha/ │ │ ├── BUILD │ │ ├── certs.proto │ │ ├── clusters.proto │ │ ├── config_dump.proto │ │ ├── init_dump.proto │ │ ├── listeners.proto │ │ ├── memory.proto │ │ ├── metrics.proto │ │ ├── mutex_stats.proto │ │ ├── server_info.proto │ │ └── tap.proto │ ├── annotations/ │ │ ├── BUILD │ │ ├── deprecation.proto │ │ └── resource.proto │ ├── api/ │ │ └── v2/ │ │ ├── BUILD │ │ ├── README.md │ │ ├── auth/ │ │ │ ├── BUILD │ │ │ ├── cert.proto │ │ │ ├── common.proto │ │ │ ├── secret.proto │ │ │ └── tls.proto │ │ ├── cds.proto │ │ ├── cluster/ │ │ │ ├── BUILD │ │ │ ├── circuit_breaker.proto │ │ │ ├── filter.proto │ │ │ └── outlier_detection.proto │ │ ├── cluster.proto │ │ ├── core/ │ │ │ ├── BUILD │ │ │ ├── address.proto │ │ │ ├── backoff.proto │ │ │ ├── base.proto │ │ │ ├── config_source.proto │ │ │ ├── event_service_config.proto │ │ │ ├── grpc_method_list.proto │ │ │ ├── grpc_service.proto │ │ │ ├── health_check.proto │ │ │ ├── http_uri.proto │ │ │ ├── protocol.proto │ │ │ └── socket_option.proto │ │ ├── discovery.proto │ │ ├── eds.proto │ │ ├── endpoint/ │ │ │ ├── BUILD │ │ │ ├── endpoint.proto │ │ │ ├── endpoint_components.proto │ │ │ └── load_report.proto │ │ ├── endpoint.proto │ │ ├── lds.proto │ │ ├── listener/ │ │ │ ├── BUILD │ │ │ ├── listener.proto │ │ │ ├── listener_components.proto │ │ │ ├── quic_config.proto │ │ │ └── udp_listener_config.proto │ │ ├── listener.proto │ │ ├── ratelimit/ │ │ │ ├── BUILD │ │ │ └── ratelimit.proto │ │ ├── rds.proto │ │ ├── route/ │ │ │ ├── BUILD │ │ │ ├── route.proto │ │ │ └── route_components.proto │ │ ├── route.proto │ │ ├── scoped_route.proto │ │ └── srds.proto │ ├── config/ │ │ ├── README.md │ │ ├── accesslog/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ ├── als.proto │ │ │ │ └── file.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── accesslog.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── accesslog.proto │ │ ├── bootstrap/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── bootstrap.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── bootstrap.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── bootstrap.proto │ │ ├── cluster/ │ │ │ ├── aggregate/ │ │ │ │ └── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── cluster.proto │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ └── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── cluster.proto │ │ │ ├── redis/ │ │ │ │ ├── BUILD │ │ │ │ └── redis_cluster.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── circuit_breaker.proto │ │ │ │ ├── cluster.proto │ │ │ │ ├── filter.proto │ │ │ │ └── outlier_detection.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── circuit_breaker.proto │ │ │ ├── cluster.proto │ │ │ ├── filter.proto │ │ │ └── outlier_detection.proto │ │ ├── common/ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ └── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── dns_cache.proto │ │ │ ├── matcher/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── matcher.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── matcher.proto │ │ │ └── tap/ │ │ │ └── v2alpha/ │ │ │ ├── BUILD │ │ │ └── common.proto │ │ ├── core/ │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── address.proto │ │ │ │ ├── backoff.proto │ │ │ │ ├── base.proto │ │ │ │ ├── config_source.proto │ │ │ │ ├── event_service_config.proto │ │ │ │ ├── extension.proto │ │ │ │ ├── grpc_method_list.proto │ │ │ │ ├── grpc_service.proto │ │ │ │ ├── health_check.proto │ │ │ │ ├── http_uri.proto │ │ │ │ ├── protocol.proto │ │ │ │ ├── proxy_protocol.proto │ │ │ │ ├── socket_option.proto │ │ │ │ └── substitution_format_string.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── address.proto │ │ │ ├── backoff.proto │ │ │ ├── base.proto │ │ │ ├── config_source.proto │ │ │ ├── event_service_config.proto │ │ │ ├── extension.proto │ │ │ ├── grpc_method_list.proto │ │ │ ├── grpc_service.proto │ │ │ ├── health_check.proto │ │ │ ├── http_uri.proto │ │ │ ├── protocol.proto │ │ │ ├── proxy_protocol.proto │ │ │ ├── socket_option.proto │ │ │ └── substitution_format_string.proto │ │ ├── endpoint/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ ├── endpoint.proto │ │ │ ├── endpoint_components.proto │ │ │ └── load_report.proto │ │ ├── filter/ │ │ │ ├── README.md │ │ │ ├── accesslog/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── accesslog.proto │ │ │ ├── dubbo/ │ │ │ │ └── router/ │ │ │ │ └── v2alpha1/ │ │ │ │ ├── BUILD │ │ │ │ └── router.proto │ │ │ ├── fault/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── fault.proto │ │ │ ├── http/ │ │ │ │ ├── adaptive_concurrency/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── adaptive_concurrency.proto │ │ │ │ ├── aws_lambda/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── aws_lambda.proto │ │ │ │ ├── aws_request_signing/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── aws_request_signing.proto │ │ │ │ ├── buffer/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── buffer.proto │ │ │ │ ├── cache/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── cache.proto │ │ │ │ ├── compressor/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── compressor.proto │ │ │ │ ├── cors/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── cors.proto │ │ │ │ ├── csrf/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── csrf.proto │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dynamic_forward_proxy.proto │ │ │ │ ├── dynamo/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dynamo.proto │ │ │ │ ├── ext_authz/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── ext_authz.proto │ │ │ │ ├── fault/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── fault.proto │ │ │ │ ├── grpc_http1_bridge/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config.proto │ │ │ │ ├── grpc_http1_reverse_bridge/ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config.proto │ │ │ │ ├── grpc_stats/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config.proto │ │ │ │ ├── grpc_web/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── grpc_web.proto │ │ │ │ ├── gzip/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── gzip.proto │ │ │ │ ├── header_to_metadata/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── header_to_metadata.proto │ │ │ │ ├── health_check/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── health_check.proto │ │ │ │ ├── ip_tagging/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── ip_tagging.proto │ │ │ │ ├── jwt_authn/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── README.md │ │ │ │ │ └── config.proto │ │ │ │ ├── lua/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── lua.proto │ │ │ │ ├── on_demand/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── on_demand.proto │ │ │ │ ├── original_src/ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── original_src.proto │ │ │ │ ├── rate_limit/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rate_limit.proto │ │ │ │ ├── rbac/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rbac.proto │ │ │ │ ├── router/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── router.proto │ │ │ │ ├── squash/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── squash.proto │ │ │ │ ├── tap/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── tap.proto │ │ │ │ └── transcoder/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── transcoder.proto │ │ │ ├── listener/ │ │ │ │ ├── http_inspector/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── http_inspector.proto │ │ │ │ ├── original_dst/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── original_dst.proto │ │ │ │ ├── original_src/ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── original_src.proto │ │ │ │ ├── proxy_protocol/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── proxy_protocol.proto │ │ │ │ └── tls_inspector/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── tls_inspector.proto │ │ │ ├── network/ │ │ │ │ ├── client_ssl_auth/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── client_ssl_auth.proto │ │ │ │ ├── direct_response/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config.proto │ │ │ │ ├── dubbo_proxy/ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── README.md │ │ │ │ │ ├── dubbo_proxy.proto │ │ │ │ │ └── route.proto │ │ │ │ ├── echo/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── echo.proto │ │ │ │ ├── ext_authz/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── ext_authz.proto │ │ │ │ ├── http_connection_manager/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── http_connection_manager.proto │ │ │ │ ├── kafka_broker/ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── kafka_broker.proto │ │ │ │ ├── local_rate_limit/ │ │ │ │ │ └── v2alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── local_rate_limit.proto │ │ │ │ ├── mongo_proxy/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── mongo_proxy.proto │ │ │ │ ├── mysql_proxy/ │ │ │ │ │ └── v1alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── mysql_proxy.proto │ │ │ │ ├── rate_limit/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rate_limit.proto │ │ │ │ ├── rbac/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rbac.proto │ │ │ │ ├── redis_proxy/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── redis_proxy.proto │ │ │ │ ├── sni_cluster/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── sni_cluster.proto │ │ │ │ ├── tcp_proxy/ │ │ │ │ │ └── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── tcp_proxy.proto │ │ │ │ ├── thrift_proxy/ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── README.md │ │ │ │ │ ├── route.proto │ │ │ │ │ └── thrift_proxy.proto │ │ │ │ └── zookeeper_proxy/ │ │ │ │ └── v1alpha1/ │ │ │ │ ├── BUILD │ │ │ │ └── zookeeper_proxy.proto │ │ │ ├── thrift/ │ │ │ │ ├── rate_limit/ │ │ │ │ │ └── v2alpha1/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rate_limit.proto │ │ │ │ └── router/ │ │ │ │ └── v2alpha1/ │ │ │ │ ├── BUILD │ │ │ │ └── router.proto │ │ │ └── udp/ │ │ │ └── udp_proxy/ │ │ │ └── v2alpha/ │ │ │ ├── BUILD │ │ │ └── udp_proxy.proto │ │ ├── grpc_credential/ │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── aws_iam.proto │ │ │ │ └── file_based_metadata.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ ├── aws_iam.proto │ │ │ └── file_based_metadata.proto │ │ ├── health_checker/ │ │ │ └── redis/ │ │ │ └── v2/ │ │ │ ├── BUILD │ │ │ └── redis.proto │ │ ├── listener/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── api_listener.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── api_listener.proto │ │ │ │ ├── listener.proto │ │ │ │ ├── listener_components.proto │ │ │ │ ├── quic_config.proto │ │ │ │ ├── udp_default_writer_config.proto │ │ │ │ ├── udp_gso_batch_writer_config.proto │ │ │ │ └── udp_listener_config.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── api_listener.proto │ │ │ ├── listener.proto │ │ │ ├── listener_components.proto │ │ │ ├── quic_config.proto │ │ │ ├── udp_default_writer_config.proto │ │ │ ├── udp_gso_batch_writer_config.proto │ │ │ └── udp_listener_config.proto │ │ ├── metrics/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ ├── metrics_service.proto │ │ │ │ └── stats.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── metrics_service.proto │ │ │ │ └── stats.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── metrics_service.proto │ │ │ └── stats.proto │ │ ├── overload/ │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── overload.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── overload.proto │ │ ├── ratelimit/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── rls.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── rls.proto │ │ ├── rbac/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── rbac.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── rbac.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── rbac.proto │ │ ├── resource_monitor/ │ │ │ ├── fixed_heap/ │ │ │ │ └── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── fixed_heap.proto │ │ │ └── injected_resource/ │ │ │ └── v2alpha/ │ │ │ ├── BUILD │ │ │ └── injected_resource.proto │ │ ├── retry/ │ │ │ ├── omit_canary_hosts/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── omit_canary_hosts.proto │ │ │ ├── omit_host_metadata/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── omit_host_metadata_config.proto │ │ │ ├── previous_hosts/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── previous_hosts.proto │ │ │ └── previous_priorities/ │ │ │ ├── BUILD │ │ │ └── previous_priorities_config.proto │ │ ├── route/ │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── route.proto │ │ │ │ ├── route_components.proto │ │ │ │ └── scoped_route.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── route.proto │ │ │ ├── route_components.proto │ │ │ └── scoped_route.proto │ │ ├── tap/ │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── common.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── common.proto │ │ ├── trace/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ ├── datadog.proto │ │ │ │ ├── dynamic_ot.proto │ │ │ │ ├── http_tracer.proto │ │ │ │ ├── lightstep.proto │ │ │ │ ├── opencensus.proto │ │ │ │ ├── service.proto │ │ │ │ ├── trace.proto │ │ │ │ └── zipkin.proto │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── xray.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── datadog.proto │ │ │ │ ├── dynamic_ot.proto │ │ │ │ ├── http_tracer.proto │ │ │ │ ├── lightstep.proto │ │ │ │ ├── opencensus.proto │ │ │ │ ├── service.proto │ │ │ │ ├── trace.proto │ │ │ │ ├── xray.proto │ │ │ │ └── zipkin.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── http_tracer.proto │ │ │ └── service.proto │ │ └── transport_socket/ │ │ ├── alts/ │ │ │ └── v2alpha/ │ │ │ ├── BUILD │ │ │ └── alts.proto │ │ ├── raw_buffer/ │ │ │ └── v2/ │ │ │ ├── BUILD │ │ │ └── raw_buffer.proto │ │ └── tap/ │ │ └── v2alpha/ │ │ ├── BUILD │ │ └── tap.proto │ ├── data/ │ │ ├── accesslog/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── accesslog.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── accesslog.proto │ │ ├── cluster/ │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── outlier_detection_event.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── outlier_detection_event.proto │ │ ├── core/ │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── health_check_event.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── health_check_event.proto │ │ ├── dns/ │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── dns_table.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── dns_table.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── dns_table.proto │ │ └── tap/ │ │ ├── v2alpha/ │ │ │ ├── BUILD │ │ │ ├── common.proto │ │ │ ├── http.proto │ │ │ ├── transport.proto │ │ │ └── wrapper.proto │ │ └── v3/ │ │ ├── BUILD │ │ ├── common.proto │ │ ├── http.proto │ │ ├── transport.proto │ │ └── wrapper.proto │ ├── extensions/ │ │ ├── access_loggers/ │ │ │ ├── file/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── file.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── file.proto │ │ │ ├── grpc/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── als.proto │ │ │ └── wasm/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── wasm.proto │ │ ├── clusters/ │ │ │ ├── aggregate/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── cluster.proto │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── cluster.proto │ │ │ └── redis/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── redis_cluster.proto │ │ ├── common/ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── dns_cache.proto │ │ │ ├── ratelimit/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── ratelimit.proto │ │ │ └── tap/ │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── common.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── common.proto │ │ ├── compression/ │ │ │ └── gzip/ │ │ │ ├── compressor/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── gzip.proto │ │ │ └── decompressor/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── gzip.proto │ │ ├── filters/ │ │ │ ├── common/ │ │ │ │ └── fault/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── fault.proto │ │ │ ├── http/ │ │ │ │ ├── adaptive_concurrency/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── adaptive_concurrency.proto │ │ │ │ ├── admission_control/ │ │ │ │ │ └── v3alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── admission_control.proto │ │ │ │ ├── aws_lambda/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── aws_lambda.proto │ │ │ │ ├── aws_request_signing/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── aws_request_signing.proto │ │ │ │ ├── buffer/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── buffer.proto │ │ │ │ ├── cache/ │ │ │ │ │ ├── v3alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── cache.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── cache.proto │ │ │ │ ├── cdn_loop/ │ │ │ │ │ └── v3alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── cdn_loop.proto │ │ │ │ ├── compressor/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── compressor.proto │ │ │ │ ├── cors/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── cors.proto │ │ │ │ ├── csrf/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── csrf.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── csrf.proto │ │ │ │ ├── decompressor/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── decompressor.proto │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dynamic_forward_proxy.proto │ │ │ │ ├── dynamo/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dynamo.proto │ │ │ │ ├── ext_authz/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── ext_authz.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── ext_authz.proto │ │ │ │ ├── fault/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── fault.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── fault.proto │ │ │ │ ├── grpc_http1_bridge/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config.proto │ │ │ │ ├── grpc_http1_reverse_bridge/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config.proto │ │ │ │ ├── grpc_json_transcoder/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── transcoder.proto │ │ │ │ ├── grpc_stats/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config.proto │ │ │ │ ├── grpc_web/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── grpc_web.proto │ │ │ │ ├── gzip/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── gzip.proto │ │ │ │ ├── header_to_metadata/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── header_to_metadata.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── header_to_metadata.proto │ │ │ │ ├── health_check/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── health_check.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── health_check.proto │ │ │ │ ├── ip_tagging/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── ip_tagging.proto │ │ │ │ ├── jwt_authn/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── config.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config.proto │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── local_rate_limit.proto │ │ │ │ ├── lua/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── lua.proto │ │ │ │ ├── oauth2/ │ │ │ │ │ ├── v3alpha/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── oauth.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── oauth.proto │ │ │ │ ├── on_demand/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── on_demand.proto │ │ │ │ ├── original_src/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── original_src.proto │ │ │ │ ├── ratelimit/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rate_limit.proto │ │ │ │ ├── rbac/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rbac.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rbac.proto │ │ │ │ ├── router/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── router.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── router.proto │ │ │ │ ├── squash/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── squash.proto │ │ │ │ ├── tap/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── tap.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── tap.proto │ │ │ │ └── wasm/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── wasm.proto │ │ │ ├── listener/ │ │ │ │ ├── http_inspector/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── http_inspector.proto │ │ │ │ ├── original_dst/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── original_dst.proto │ │ │ │ ├── original_src/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── original_src.proto │ │ │ │ ├── proxy_protocol/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── proxy_protocol.proto │ │ │ │ └── tls_inspector/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── tls_inspector.proto │ │ │ ├── network/ │ │ │ │ ├── client_ssl_auth/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── client_ssl_auth.proto │ │ │ │ ├── direct_response/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config.proto │ │ │ │ ├── dubbo_proxy/ │ │ │ │ │ ├── router/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── router.proto │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── dubbo_proxy.proto │ │ │ │ │ │ └── route.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── dubbo_proxy.proto │ │ │ │ │ └── route.proto │ │ │ │ ├── echo/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── echo.proto │ │ │ │ ├── ext_authz/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── ext_authz.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── ext_authz.proto │ │ │ │ ├── http_connection_manager/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── http_connection_manager.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── http_connection_manager.proto │ │ │ │ ├── kafka_broker/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── kafka_broker.proto │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── local_rate_limit.proto │ │ │ │ ├── mongo_proxy/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── mongo_proxy.proto │ │ │ │ ├── mysql_proxy/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── mysql_proxy.proto │ │ │ │ ├── postgres_proxy/ │ │ │ │ │ └── v3alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── postgres_proxy.proto │ │ │ │ ├── ratelimit/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rate_limit.proto │ │ │ │ ├── rbac/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rbac.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── rbac.proto │ │ │ │ ├── redis_proxy/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── redis_proxy.proto │ │ │ │ ├── rocketmq_proxy/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── rocketmq_proxy.proto │ │ │ │ │ │ └── route.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── rocketmq_proxy.proto │ │ │ │ │ └── route.proto │ │ │ │ ├── sni_cluster/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── sni_cluster.proto │ │ │ │ ├── sni_dynamic_forward_proxy/ │ │ │ │ │ └── v3alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── sni_dynamic_forward_proxy.proto │ │ │ │ ├── tcp_proxy/ │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── tcp_proxy.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── tcp_proxy.proto │ │ │ │ ├── thrift_proxy/ │ │ │ │ │ ├── filters/ │ │ │ │ │ │ └── ratelimit/ │ │ │ │ │ │ └── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── rate_limit.proto │ │ │ │ │ ├── v3/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── route.proto │ │ │ │ │ │ └── thrift_proxy.proto │ │ │ │ │ └── v4alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── route.proto │ │ │ │ │ └── thrift_proxy.proto │ │ │ │ ├── wasm/ │ │ │ │ │ └── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── wasm.proto │ │ │ │ └── zookeeper_proxy/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── zookeeper_proxy.proto │ │ │ └── udp/ │ │ │ ├── dns_filter/ │ │ │ │ ├── v3alpha/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── dns_filter.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── dns_filter.proto │ │ │ └── udp_proxy/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── udp_proxy.proto │ │ ├── internal_redirect/ │ │ │ ├── allow_listed_routes/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── allow_listed_routes_config.proto │ │ │ ├── previous_routes/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── previous_routes_config.proto │ │ │ └── safe_cross_scheme/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── safe_cross_scheme_config.proto │ │ ├── network/ │ │ │ └── socket_interface/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── default_socket_interface.proto │ │ ├── retry/ │ │ │ ├── host/ │ │ │ │ └── omit_host_metadata/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── omit_host_metadata_config.proto │ │ │ └── priority/ │ │ │ └── previous_priorities/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── previous_priorities_config.proto │ │ ├── stat_sinks/ │ │ │ └── wasm/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── wasm.proto │ │ ├── tracers/ │ │ │ ├── datadog/ │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── datadog.proto │ │ │ ├── dynamic_ot/ │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── dynamic_ot.proto │ │ │ ├── lightstep/ │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── lightstep.proto │ │ │ ├── opencensus/ │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── opencensus.proto │ │ │ ├── xray/ │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── xray.proto │ │ │ └── zipkin/ │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── zipkin.proto │ │ ├── transport_sockets/ │ │ │ ├── alts/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── alts.proto │ │ │ ├── proxy_protocol/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── upstream_proxy_protocol.proto │ │ │ ├── quic/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── quic_transport.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── quic_transport.proto │ │ │ ├── raw_buffer/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── raw_buffer.proto │ │ │ ├── tap/ │ │ │ │ ├── v3/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── tap.proto │ │ │ │ └── v4alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── tap.proto │ │ │ └── tls/ │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── cert.proto │ │ │ │ ├── common.proto │ │ │ │ ├── secret.proto │ │ │ │ └── tls.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── common.proto │ │ │ ├── secret.proto │ │ │ └── tls.proto │ │ ├── upstreams/ │ │ │ └── http/ │ │ │ ├── generic/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── generic_connection_pool.proto │ │ │ ├── http/ │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── http_connection_pool.proto │ │ │ └── tcp/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── tcp_connection_pool.proto │ │ ├── wasm/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── wasm.proto │ │ └── watchdog/ │ │ ├── abort_action/ │ │ │ └── v3alpha/ │ │ │ ├── BUILD │ │ │ └── abort_action.proto │ │ └── profile_action/ │ │ └── v3alpha/ │ │ ├── BUILD │ │ └── profile_action.proto │ ├── service/ │ │ ├── README.md │ │ ├── accesslog/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── als.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── als.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── als.proto │ │ ├── auth/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ ├── attribute_context.proto │ │ │ │ └── external_auth.proto │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── external_auth.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── attribute_context.proto │ │ │ │ └── external_auth.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── attribute_context.proto │ │ │ └── external_auth.proto │ │ ├── cluster/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── cds.proto │ │ ├── discovery/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ ├── ads.proto │ │ │ │ ├── hds.proto │ │ │ │ ├── rtds.proto │ │ │ │ └── sds.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── ads.proto │ │ │ │ └── discovery.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── ads.proto │ │ │ └── discovery.proto │ │ ├── endpoint/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── eds.proto │ │ ├── event_reporting/ │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ └── event_reporting_service.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── event_reporting_service.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── event_reporting_service.proto │ │ ├── extension/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── config_discovery.proto │ │ ├── health/ │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── hds.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── hds.proto │ │ ├── listener/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── lds.proto │ │ ├── load_stats/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── lrs.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── lrs.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── lrs.proto │ │ ├── metrics/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── metrics_service.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── metrics_service.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── metrics_service.proto │ │ ├── ratelimit/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── rls.proto │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── rls.proto │ │ ├── route/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ ├── rds.proto │ │ │ └── srds.proto │ │ ├── runtime/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── rtds.proto │ │ ├── secret/ │ │ │ └── v3/ │ │ │ ├── BUILD │ │ │ └── sds.proto │ │ ├── status/ │ │ │ ├── v2/ │ │ │ │ ├── BUILD │ │ │ │ └── csds.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── csds.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── csds.proto │ │ ├── tap/ │ │ │ ├── v2alpha/ │ │ │ │ ├── BUILD │ │ │ │ ├── common.proto │ │ │ │ └── tap.proto │ │ │ ├── v3/ │ │ │ │ ├── BUILD │ │ │ │ └── tap.proto │ │ │ └── v4alpha/ │ │ │ ├── BUILD │ │ │ └── tap.proto │ │ └── trace/ │ │ ├── v2/ │ │ │ ├── BUILD │ │ │ └── trace_service.proto │ │ ├── v3/ │ │ │ ├── BUILD │ │ │ └── trace_service.proto │ │ └── v4alpha/ │ │ ├── BUILD │ │ └── trace_service.proto │ └── type/ │ ├── BUILD │ ├── hash_policy.proto │ ├── http.proto │ ├── http_status.proto │ ├── matcher/ │ │ ├── BUILD │ │ ├── metadata.proto │ │ ├── node.proto │ │ ├── number.proto │ │ ├── path.proto │ │ ├── regex.proto │ │ ├── string.proto │ │ ├── struct.proto │ │ ├── v3/ │ │ │ ├── BUILD │ │ │ ├── metadata.proto │ │ │ ├── node.proto │ │ │ ├── number.proto │ │ │ ├── path.proto │ │ │ ├── regex.proto │ │ │ ├── string.proto │ │ │ ├── struct.proto │ │ │ └── value.proto │ │ ├── v4alpha/ │ │ │ ├── BUILD │ │ │ ├── metadata.proto │ │ │ ├── node.proto │ │ │ ├── number.proto │ │ │ ├── path.proto │ │ │ ├── regex.proto │ │ │ ├── string.proto │ │ │ ├── struct.proto │ │ │ └── value.proto │ │ └── value.proto │ ├── metadata/ │ │ ├── v2/ │ │ │ ├── BUILD │ │ │ └── metadata.proto │ │ └── v3/ │ │ ├── BUILD │ │ └── metadata.proto │ ├── percent.proto │ ├── range.proto │ ├── semantic_version.proto │ ├── token_bucket.proto │ ├── tracing/ │ │ ├── v2/ │ │ │ ├── BUILD │ │ │ └── custom_tag.proto │ │ └── v3/ │ │ ├── BUILD │ │ └── custom_tag.proto │ └── v3/ │ ├── BUILD │ ├── hash_policy.proto │ ├── http.proto │ ├── http_status.proto │ ├── percent.proto │ ├── range.proto │ ├── ratelimit_unit.proto │ ├── semantic_version.proto │ └── token_bucket.proto ├── include/ │ └── envoy/ │ ├── access_log/ │ │ ├── BUILD │ │ └── access_log.h │ ├── api/ │ │ ├── BUILD │ │ ├── api.h │ │ ├── io_error.h │ │ ├── os_sys_calls.h │ │ ├── os_sys_calls_common.h │ │ ├── os_sys_calls_hot_restart.h │ │ └── os_sys_calls_linux.h │ ├── buffer/ │ │ ├── BUILD │ │ └── buffer.h │ ├── common/ │ │ ├── BUILD │ │ ├── backoff_strategy.h │ │ ├── callback.h │ │ ├── conn_pool.h │ │ ├── crypto/ │ │ │ ├── BUILD │ │ │ └── crypto.h │ │ ├── exception.h │ │ ├── interval_set.h │ │ ├── matchers.h │ │ ├── mutex_tracer.h │ │ ├── platform.h │ │ ├── pure.h │ │ ├── random_generator.h │ │ ├── regex.h │ │ ├── resource.h │ │ ├── scope_tracker.h │ │ ├── time.h │ │ └── token_bucket.h │ ├── compression/ │ │ ├── compressor/ │ │ │ ├── BUILD │ │ │ ├── compressor.h │ │ │ ├── config.h │ │ │ └── factory.h │ │ └── decompressor/ │ │ ├── BUILD │ │ ├── config.h │ │ ├── decompressor.h │ │ └── factory.h │ ├── config/ │ │ ├── BUILD │ │ ├── config_provider.h │ │ ├── config_provider_manager.h │ │ ├── extension_config_provider.h │ │ ├── grpc_mux.h │ │ ├── subscription.h │ │ ├── subscription_factory.h │ │ ├── typed_config.h │ │ └── typed_metadata.h │ ├── event/ │ │ ├── BUILD │ │ ├── deferred_deletable.h │ │ ├── dispatcher.h │ │ ├── file_event.h │ │ ├── range_timer.h │ │ ├── schedulable_cb.h │ │ ├── signal.h │ │ └── timer.h │ ├── filesystem/ │ │ ├── BUILD │ │ ├── filesystem.h │ │ └── watcher.h │ ├── filter/ │ │ └── http/ │ │ ├── BUILD │ │ └── filter_config_provider.h │ ├── formatter/ │ │ ├── BUILD │ │ └── substitution_formatter.h │ ├── grpc/ │ │ ├── BUILD │ │ ├── async_client.h │ │ ├── async_client_manager.h │ │ ├── context.h │ │ ├── google_grpc_creds.h │ │ └── status.h │ ├── http/ │ │ ├── BUILD │ │ ├── api_listener.h │ │ ├── async_client.h │ │ ├── codec.h │ │ ├── codes.h │ │ ├── conn_pool.h │ │ ├── context.h │ │ ├── filter.h │ │ ├── hash_policy.h │ │ ├── header_map.h │ │ ├── message.h │ │ ├── metadata_interface.h │ │ ├── protocol.h │ │ ├── query_params.h │ │ └── request_id_extension.h │ ├── init/ │ │ ├── BUILD │ │ ├── manager.h │ │ ├── target.h │ │ └── watcher.h │ ├── json/ │ │ ├── BUILD │ │ └── json_object.h │ ├── local_info/ │ │ ├── BUILD │ │ └── local_info.h │ ├── network/ │ │ ├── BUILD │ │ ├── address.h │ │ ├── connection.h │ │ ├── connection_balancer.h │ │ ├── connection_handler.h │ │ ├── dns.h │ │ ├── drain_decision.h │ │ ├── exception.h │ │ ├── filter.h │ │ ├── hash_policy.h │ │ ├── io_handle.h │ │ ├── listen_socket.h │ │ ├── listener.h │ │ ├── post_io_action.h │ │ ├── proxy_protocol.h │ │ ├── resolver.h │ │ ├── socket.h │ │ ├── socket_interface.h │ │ ├── transport_socket.h │ │ ├── udp_packet_writer_config.h │ │ └── udp_packet_writer_handler.h │ ├── protobuf/ │ │ ├── BUILD │ │ └── message_validator.h │ ├── ratelimit/ │ │ ├── BUILD │ │ └── ratelimit.h │ ├── registry/ │ │ ├── BUILD │ │ └── registry.h │ ├── router/ │ │ ├── BUILD │ │ ├── internal_redirect.h │ │ ├── rds.h │ │ ├── route_config_provider_manager.h │ │ ├── route_config_update_receiver.h │ │ ├── router.h │ │ ├── router_ratelimit.h │ │ ├── scopes.h │ │ ├── shadow_writer.h │ │ └── string_accessor.h │ ├── runtime/ │ │ ├── BUILD │ │ └── runtime.h │ ├── secret/ │ │ ├── BUILD │ │ ├── secret_callbacks.h │ │ ├── secret_manager.h │ │ └── secret_provider.h │ ├── server/ │ │ ├── BUILD │ │ ├── access_log_config.h │ │ ├── active_udp_listener_config.h │ │ ├── admin.h │ │ ├── api_listener.h │ │ ├── bootstrap_extension_config.h │ │ ├── config_tracker.h │ │ ├── configuration.h │ │ ├── drain_manager.h │ │ ├── factory_context.h │ │ ├── filter_config.h │ │ ├── guarddog.h │ │ ├── guarddog_config.h │ │ ├── health_checker_config.h │ │ ├── hot_restart.h │ │ ├── instance.h │ │ ├── lifecycle_notifier.h │ │ ├── listener_manager.h │ │ ├── options.h │ │ ├── overload_manager.h │ │ ├── process_context.h │ │ ├── request_id_extension_config.h │ │ ├── resource_monitor.h │ │ ├── resource_monitor_config.h │ │ ├── tracer_config.h │ │ ├── transport_socket_config.h │ │ ├── watchdog.h │ │ └── worker.h │ ├── singleton/ │ │ ├── BUILD │ │ ├── instance.h │ │ └── manager.h │ ├── ssl/ │ │ ├── BUILD │ │ ├── certificate_validation_context_config.h │ │ ├── connection.h │ │ ├── context.h │ │ ├── context_config.h │ │ ├── context_manager.h │ │ ├── handshaker.h │ │ ├── private_key/ │ │ │ ├── BUILD │ │ │ ├── private_key.h │ │ │ ├── private_key_callbacks.h │ │ │ └── private_key_config.h │ │ ├── ssl_socket_extended_info.h │ │ ├── ssl_socket_state.h │ │ └── tls_certificate_config.h │ ├── stats/ │ │ ├── BUILD │ │ ├── allocator.h │ │ ├── histogram.h │ │ ├── primitive_stats.h │ │ ├── primitive_stats_macros.h │ │ ├── refcount_ptr.h │ │ ├── scope.h │ │ ├── sink.h │ │ ├── stats.h │ │ ├── stats_macros.h │ │ ├── stats_matcher.h │ │ ├── store.h │ │ ├── symbol_table.h │ │ ├── tag.h │ │ ├── tag_extractor.h │ │ ├── tag_producer.h │ │ └── timespan.h │ ├── stream_info/ │ │ ├── BUILD │ │ ├── filter_state.h │ │ ├── stream_info.h │ │ └── uint32_accessor.h │ ├── tcp/ │ │ ├── BUILD │ │ └── conn_pool.h │ ├── thread/ │ │ ├── BUILD │ │ └── thread.h │ ├── thread_local/ │ │ ├── BUILD │ │ └── thread_local.h │ ├── tracing/ │ │ ├── BUILD │ │ ├── http_tracer.h │ │ └── http_tracer_manager.h │ ├── udp/ │ │ ├── BUILD │ │ └── hash_policy.h │ └── upstream/ │ ├── BUILD │ ├── cluster_factory.h │ ├── cluster_manager.h │ ├── health_check_host_monitor.h │ ├── health_checker.h │ ├── host_description.h │ ├── load_balancer.h │ ├── load_balancer_type.h │ ├── locality.h │ ├── outlier_detection.h │ ├── resource_manager.h │ ├── retry.h │ ├── thread_local_cluster.h │ ├── types.h │ └── upstream.h ├── repokitteh.star ├── restarter/ │ ├── BUILD │ └── hot-restarter.py ├── security/ │ ├── email-templates.md │ ├── gh-cve-template.md │ ├── postmortem-template.md │ └── postmortems/ │ ├── cve-2019-15225.md │ ├── cve-2019-15226.md │ ├── cve-2019-9900.md │ └── cve-2019-9901.md ├── source/ │ ├── common/ │ │ ├── access_log/ │ │ │ ├── BUILD │ │ │ ├── access_log_impl.cc │ │ │ ├── access_log_impl.h │ │ │ ├── access_log_manager_impl.cc │ │ │ └── access_log_manager_impl.h │ │ ├── api/ │ │ │ ├── BUILD │ │ │ ├── api_impl.cc │ │ │ ├── api_impl.h │ │ │ ├── posix/ │ │ │ │ ├── os_sys_calls_impl.cc │ │ │ │ ├── os_sys_calls_impl.h │ │ │ │ ├── os_sys_calls_impl_hot_restart.cc │ │ │ │ ├── os_sys_calls_impl_hot_restart.h │ │ │ │ ├── os_sys_calls_impl_linux.cc │ │ │ │ └── os_sys_calls_impl_linux.h │ │ │ └── win32/ │ │ │ ├── os_sys_calls_impl.cc │ │ │ └── os_sys_calls_impl.h │ │ ├── buffer/ │ │ │ ├── BUILD │ │ │ ├── buffer_impl.cc │ │ │ ├── buffer_impl.h │ │ │ ├── watermark_buffer.cc │ │ │ ├── watermark_buffer.h │ │ │ ├── zero_copy_input_stream_impl.cc │ │ │ └── zero_copy_input_stream_impl.h │ │ ├── chromium_url/ │ │ │ ├── BUILD │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── envoy_shim.h │ │ │ ├── url_canon.cc │ │ │ ├── url_canon.h │ │ │ ├── url_canon_internal.cc │ │ │ ├── url_canon_internal.h │ │ │ ├── url_canon_path.cc │ │ │ ├── url_canon_stdstring.cc │ │ │ ├── url_canon_stdstring.h │ │ │ ├── url_parse.h │ │ │ └── url_parse_internal.h │ │ ├── common/ │ │ │ ├── BUILD │ │ │ ├── android/ │ │ │ │ ├── logger_impl.cc │ │ │ │ └── logger_impl.h │ │ │ ├── assert.cc │ │ │ ├── assert.h │ │ │ ├── backoff_strategy.cc │ │ │ ├── backoff_strategy.h │ │ │ ├── base64.cc │ │ │ ├── base64.h │ │ │ ├── base_logger.cc │ │ │ ├── base_logger.h │ │ │ ├── basic_resource_impl.h │ │ │ ├── byte_order.h │ │ │ ├── c_smart_ptr.h │ │ │ ├── callback_impl.h │ │ │ ├── cleanup.h │ │ │ ├── compiler_requirements.h │ │ │ ├── debug_recursion_checker.h │ │ │ ├── documentation_url.h │ │ │ ├── dump_state_utils.h │ │ │ ├── empty_string.h │ │ │ ├── enum_to_int.h │ │ │ ├── fancy_logger.cc │ │ │ ├── fancy_logger.h │ │ │ ├── fmt.h │ │ │ ├── hash.cc │ │ │ ├── hash.h │ │ │ ├── hex.cc │ │ │ ├── hex.h │ │ │ ├── linked_object.h │ │ │ ├── lock_guard.h │ │ │ ├── logger.cc │ │ │ ├── logger.h │ │ │ ├── logger_delegates.cc │ │ │ ├── logger_delegates.h │ │ │ ├── macros.h │ │ │ ├── matchers.cc │ │ │ ├── matchers.h │ │ │ ├── mem_block_builder.h │ │ │ ├── mutex_tracer_impl.cc │ │ │ ├── mutex_tracer_impl.h │ │ │ ├── non_copyable.h │ │ │ ├── perf_annotation.cc │ │ │ ├── perf_annotation.h │ │ │ ├── phantom.h │ │ │ ├── posix/ │ │ │ │ ├── thread_impl.cc │ │ │ │ └── thread_impl.h │ │ │ ├── random_generator.cc │ │ │ ├── random_generator.h │ │ │ ├── regex.cc │ │ │ ├── regex.h │ │ │ ├── scalar_to_byte_vector.h │ │ │ ├── scope_tracker.h │ │ │ ├── standard/ │ │ │ │ └── logger_impl.h │ │ │ ├── statusor.h │ │ │ ├── stl_helpers.h │ │ │ ├── thread.h │ │ │ ├── thread_annotations.h │ │ │ ├── thread_synchronizer.cc │ │ │ ├── thread_synchronizer.h │ │ │ ├── token_bucket_impl.cc │ │ │ ├── token_bucket_impl.h │ │ │ ├── utility.cc │ │ │ ├── utility.h │ │ │ └── win32/ │ │ │ ├── thread_impl.cc │ │ │ └── thread_impl.h │ │ ├── config/ │ │ │ ├── BUILD │ │ │ ├── README.md │ │ │ ├── api_type_oracle.cc │ │ │ ├── api_type_oracle.h │ │ │ ├── api_version.h │ │ │ ├── config_provider_impl.cc │ │ │ ├── config_provider_impl.h │ │ │ ├── datasource.cc │ │ │ ├── datasource.h │ │ │ ├── decoded_resource_impl.h │ │ │ ├── delta_subscription_state.cc │ │ │ ├── delta_subscription_state.h │ │ │ ├── filesystem_subscription_impl.cc │ │ │ ├── filesystem_subscription_impl.h │ │ │ ├── grpc_mux_impl.cc │ │ │ ├── grpc_mux_impl.h │ │ │ ├── grpc_stream.h │ │ │ ├── grpc_subscription_impl.cc │ │ │ ├── grpc_subscription_impl.h │ │ │ ├── http_subscription_impl.cc │ │ │ ├── http_subscription_impl.h │ │ │ ├── metadata.cc │ │ │ ├── metadata.h │ │ │ ├── new_grpc_mux_impl.cc │ │ │ ├── new_grpc_mux_impl.h │ │ │ ├── opaque_resource_decoder_impl.h │ │ │ ├── pausable_ack_queue.cc │ │ │ ├── pausable_ack_queue.h │ │ │ ├── protobuf_link_hacks.h │ │ │ ├── remote_data_fetcher.cc │ │ │ ├── remote_data_fetcher.h │ │ │ ├── resource_name.h │ │ │ ├── runtime_utility.cc │ │ │ ├── runtime_utility.h │ │ │ ├── subscription_base.h │ │ │ ├── subscription_factory_impl.cc │ │ │ ├── subscription_factory_impl.h │ │ │ ├── type_to_endpoint.cc │ │ │ ├── type_to_endpoint.h │ │ │ ├── udpa_context_params.cc │ │ │ ├── udpa_context_params.h │ │ │ ├── udpa_resource.cc │ │ │ ├── udpa_resource.h │ │ │ ├── update_ack.h │ │ │ ├── utility.cc │ │ │ ├── utility.h │ │ │ ├── version_converter.cc │ │ │ ├── version_converter.h │ │ │ ├── watch_map.cc │ │ │ ├── watch_map.h │ │ │ ├── well_known_names.cc │ │ │ └── well_known_names.h │ │ ├── conn_pool/ │ │ │ ├── BUILD │ │ │ ├── conn_pool_base.cc │ │ │ └── conn_pool_base.h │ │ ├── crypto/ │ │ │ ├── BUILD │ │ │ └── utility.h │ │ ├── event/ │ │ │ ├── BUILD │ │ │ ├── deferred_task.h │ │ │ ├── dispatcher_impl.cc │ │ │ ├── dispatcher_impl.h │ │ │ ├── event_impl_base.cc │ │ │ ├── event_impl_base.h │ │ │ ├── file_event_impl.cc │ │ │ ├── file_event_impl.h │ │ │ ├── libevent.cc │ │ │ ├── libevent.h │ │ │ ├── libevent_scheduler.cc │ │ │ ├── libevent_scheduler.h │ │ │ ├── real_time_system.cc │ │ │ ├── real_time_system.h │ │ │ ├── scaled_range_timer_manager.cc │ │ │ ├── scaled_range_timer_manager.h │ │ │ ├── schedulable_cb_impl.cc │ │ │ ├── schedulable_cb_impl.h │ │ │ ├── signal_impl.cc │ │ │ ├── signal_impl.h │ │ │ ├── timer_impl.cc │ │ │ └── timer_impl.h │ │ ├── filesystem/ │ │ │ ├── BUILD │ │ │ ├── directory.h │ │ │ ├── file_shared_impl.cc │ │ │ ├── file_shared_impl.h │ │ │ ├── inotify/ │ │ │ │ ├── watcher_impl.cc │ │ │ │ └── watcher_impl.h │ │ │ ├── kqueue/ │ │ │ │ ├── watcher_impl.cc │ │ │ │ └── watcher_impl.h │ │ │ ├── posix/ │ │ │ │ ├── directory_iterator_impl.cc │ │ │ │ ├── directory_iterator_impl.h │ │ │ │ ├── filesystem_impl.cc │ │ │ │ └── filesystem_impl.h │ │ │ └── win32/ │ │ │ ├── directory_iterator_impl.cc │ │ │ ├── directory_iterator_impl.h │ │ │ ├── filesystem_impl.cc │ │ │ ├── filesystem_impl.h │ │ │ ├── watcher_impl.cc │ │ │ └── watcher_impl.h │ │ ├── filter/ │ │ │ └── http/ │ │ │ ├── BUILD │ │ │ ├── filter_config_discovery_impl.cc │ │ │ └── filter_config_discovery_impl.h │ │ ├── formatter/ │ │ │ ├── BUILD │ │ │ ├── substitution_format_string.cc │ │ │ ├── substitution_format_string.h │ │ │ ├── substitution_formatter.cc │ │ │ └── substitution_formatter.h │ │ ├── grpc/ │ │ │ ├── BUILD │ │ │ ├── async_client_impl.cc │ │ │ ├── async_client_impl.h │ │ │ ├── async_client_manager_impl.cc │ │ │ ├── async_client_manager_impl.h │ │ │ ├── codec.cc │ │ │ ├── codec.h │ │ │ ├── common.cc │ │ │ ├── common.h │ │ │ ├── context_impl.cc │ │ │ ├── context_impl.h │ │ │ ├── google_async_client_impl.cc │ │ │ ├── google_async_client_impl.h │ │ │ ├── google_grpc_context.cc │ │ │ ├── google_grpc_context.h │ │ │ ├── google_grpc_creds_impl.cc │ │ │ ├── google_grpc_creds_impl.h │ │ │ ├── google_grpc_utils.cc │ │ │ ├── google_grpc_utils.h │ │ │ ├── stat_names.cc │ │ │ ├── stat_names.h │ │ │ ├── status.cc │ │ │ ├── status.h │ │ │ ├── typed_async_client.cc │ │ │ └── typed_async_client.h │ │ ├── html/ │ │ │ ├── BUILD │ │ │ ├── utility.cc │ │ │ └── utility.h │ │ ├── http/ │ │ │ ├── BUILD │ │ │ ├── async_client_impl.cc │ │ │ ├── async_client_impl.h │ │ │ ├── async_client_utility.cc │ │ │ ├── async_client_utility.h │ │ │ ├── codec_client.cc │ │ │ ├── codec_client.h │ │ │ ├── codec_helper.h │ │ │ ├── codec_wrappers.h │ │ │ ├── codes.cc │ │ │ ├── codes.h │ │ │ ├── conn_manager_config.h │ │ │ ├── conn_manager_impl.cc │ │ │ ├── conn_manager_impl.h │ │ │ ├── conn_manager_utility.cc │ │ │ ├── conn_manager_utility.h │ │ │ ├── conn_pool_base.cc │ │ │ ├── conn_pool_base.h │ │ │ ├── context_impl.cc │ │ │ ├── context_impl.h │ │ │ ├── date_provider.h │ │ │ ├── date_provider_impl.cc │ │ │ ├── date_provider_impl.h │ │ │ ├── default_server_string.h │ │ │ ├── exception.h │ │ │ ├── filter_manager.cc │ │ │ ├── filter_manager.h │ │ │ ├── hash_policy.cc │ │ │ ├── hash_policy.h │ │ │ ├── header_list_view.cc │ │ │ ├── header_list_view.h │ │ │ ├── header_map_impl.cc │ │ │ ├── header_map_impl.h │ │ │ ├── header_utility.cc │ │ │ ├── header_utility.h │ │ │ ├── headers.h │ │ │ ├── http1/ │ │ │ │ ├── BUILD │ │ │ │ ├── codec_impl.cc │ │ │ │ ├── codec_impl.h │ │ │ │ ├── codec_impl_legacy.cc │ │ │ │ ├── codec_impl_legacy.h │ │ │ │ ├── codec_stats.h │ │ │ │ ├── conn_pool.cc │ │ │ │ ├── conn_pool.h │ │ │ │ ├── header_formatter.cc │ │ │ │ └── header_formatter.h │ │ │ ├── http2/ │ │ │ │ ├── BUILD │ │ │ │ ├── codec_impl.cc │ │ │ │ ├── codec_impl.h │ │ │ │ ├── codec_impl_legacy.cc │ │ │ │ ├── codec_impl_legacy.h │ │ │ │ ├── codec_stats.h │ │ │ │ ├── conn_pool.cc │ │ │ │ ├── conn_pool.h │ │ │ │ ├── metadata_decoder.cc │ │ │ │ ├── metadata_decoder.h │ │ │ │ ├── metadata_encoder.cc │ │ │ │ ├── metadata_encoder.h │ │ │ │ ├── nghttp2.cc │ │ │ │ ├── nghttp2.h │ │ │ │ ├── protocol_constraints.cc │ │ │ │ └── protocol_constraints.h │ │ │ ├── http3/ │ │ │ │ ├── BUILD │ │ │ │ ├── quic_codec_factory.h │ │ │ │ └── well_known_names.h │ │ │ ├── message_impl.h │ │ │ ├── path_utility.cc │ │ │ ├── path_utility.h │ │ │ ├── request_id_extension_impl.cc │ │ │ ├── request_id_extension_impl.h │ │ │ ├── request_id_extension_uuid_impl.cc │ │ │ ├── request_id_extension_uuid_impl.h │ │ │ ├── rest_api_fetcher.cc │ │ │ ├── rest_api_fetcher.h │ │ │ ├── status.cc │ │ │ ├── status.h │ │ │ ├── user_agent.cc │ │ │ ├── user_agent.h │ │ │ ├── utility.cc │ │ │ └── utility.h │ │ ├── init/ │ │ │ ├── BUILD │ │ │ ├── manager_impl.cc │ │ │ ├── manager_impl.h │ │ │ ├── target_impl.cc │ │ │ ├── target_impl.h │ │ │ ├── watcher_impl.cc │ │ │ └── watcher_impl.h │ │ ├── json/ │ │ │ ├── BUILD │ │ │ ├── json_loader.cc │ │ │ └── json_loader.h │ │ ├── local_info/ │ │ │ ├── BUILD │ │ │ └── local_info_impl.h │ │ ├── local_reply/ │ │ │ ├── BUILD │ │ │ ├── local_reply.cc │ │ │ └── local_reply.h │ │ ├── memory/ │ │ │ ├── BUILD │ │ │ ├── heap_shrinker.cc │ │ │ ├── heap_shrinker.h │ │ │ ├── stats.cc │ │ │ ├── stats.h │ │ │ ├── utils.cc │ │ │ └── utils.h │ │ ├── network/ │ │ │ ├── BUILD │ │ │ ├── addr_family_aware_socket_option_impl.cc │ │ │ ├── addr_family_aware_socket_option_impl.h │ │ │ ├── address_impl.cc │ │ │ ├── address_impl.h │ │ │ ├── apple_dns_impl.cc │ │ │ ├── apple_dns_impl.h │ │ │ ├── application_protocol.cc │ │ │ ├── application_protocol.h │ │ │ ├── base_listener_impl.cc │ │ │ ├── base_listener_impl.h │ │ │ ├── cidr_range.cc │ │ │ ├── cidr_range.h │ │ │ ├── connection_balancer_impl.cc │ │ │ ├── connection_balancer_impl.h │ │ │ ├── connection_impl.cc │ │ │ ├── connection_impl.h │ │ │ ├── connection_impl_base.cc │ │ │ ├── connection_impl_base.h │ │ │ ├── dns_impl.cc │ │ │ ├── dns_impl.h │ │ │ ├── filter_impl.h │ │ │ ├── filter_manager_impl.cc │ │ │ ├── filter_manager_impl.h │ │ │ ├── filter_matcher.cc │ │ │ ├── filter_matcher.h │ │ │ ├── hash_policy.cc │ │ │ ├── hash_policy.h │ │ │ ├── io_socket_error_impl.cc │ │ │ ├── io_socket_error_impl.h │ │ │ ├── io_socket_handle_impl.cc │ │ │ ├── io_socket_handle_impl.h │ │ │ ├── lc_trie.h │ │ │ ├── listen_socket_impl.cc │ │ │ ├── listen_socket_impl.h │ │ │ ├── proxy_protocol_filter_state.cc │ │ │ ├── proxy_protocol_filter_state.h │ │ │ ├── raw_buffer_socket.cc │ │ │ ├── raw_buffer_socket.h │ │ │ ├── resolver_impl.cc │ │ │ ├── resolver_impl.h │ │ │ ├── socket_impl.cc │ │ │ ├── socket_impl.h │ │ │ ├── socket_interface.h │ │ │ ├── socket_interface_impl.cc │ │ │ ├── socket_interface_impl.h │ │ │ ├── socket_option_factory.cc │ │ │ ├── socket_option_factory.h │ │ │ ├── socket_option_impl.cc │ │ │ ├── socket_option_impl.h │ │ │ ├── tcp_listener_impl.cc │ │ │ ├── tcp_listener_impl.h │ │ │ ├── transport_socket_options_impl.cc │ │ │ ├── transport_socket_options_impl.h │ │ │ ├── udp_default_writer_config.cc │ │ │ ├── udp_default_writer_config.h │ │ │ ├── udp_listener_impl.cc │ │ │ ├── udp_listener_impl.h │ │ │ ├── udp_packet_writer_handler_impl.cc │ │ │ ├── udp_packet_writer_handler_impl.h │ │ │ ├── upstream_server_name.cc │ │ │ ├── upstream_server_name.h │ │ │ ├── upstream_subject_alt_names.cc │ │ │ ├── upstream_subject_alt_names.h │ │ │ ├── utility.cc │ │ │ └── utility.h │ │ ├── profiler/ │ │ │ ├── BUILD │ │ │ ├── profiler.cc │ │ │ └── profiler.h │ │ ├── protobuf/ │ │ │ ├── BUILD │ │ │ ├── message_validator_impl.cc │ │ │ ├── message_validator_impl.h │ │ │ ├── protobuf.h │ │ │ ├── type_util.cc │ │ │ ├── type_util.h │ │ │ ├── utility.cc │ │ │ ├── utility.h │ │ │ ├── visitor.cc │ │ │ ├── visitor.h │ │ │ └── well_known.h │ │ ├── router/ │ │ │ ├── BUILD │ │ │ ├── config_impl.cc │ │ │ ├── config_impl.h │ │ │ ├── config_utility.cc │ │ │ ├── config_utility.h │ │ │ ├── debug_config.cc │ │ │ ├── debug_config.h │ │ │ ├── header_formatter.cc │ │ │ ├── header_formatter.h │ │ │ ├── header_parser.cc │ │ │ ├── header_parser.h │ │ │ ├── metadatamatchcriteria_impl.cc │ │ │ ├── metadatamatchcriteria_impl.h │ │ │ ├── rds_impl.cc │ │ │ ├── rds_impl.h │ │ │ ├── reset_header_parser.cc │ │ │ ├── reset_header_parser.h │ │ │ ├── retry_state_impl.cc │ │ │ ├── retry_state_impl.h │ │ │ ├── route_config_update_receiver_impl.cc │ │ │ ├── route_config_update_receiver_impl.h │ │ │ ├── router.cc │ │ │ ├── router.h │ │ │ ├── router_ratelimit.cc │ │ │ ├── router_ratelimit.h │ │ │ ├── scoped_config_impl.cc │ │ │ ├── scoped_config_impl.h │ │ │ ├── scoped_rds.cc │ │ │ ├── scoped_rds.h │ │ │ ├── shadow_writer_impl.cc │ │ │ ├── shadow_writer_impl.h │ │ │ ├── string_accessor_impl.h │ │ │ ├── tls_context_match_criteria_impl.cc │ │ │ ├── tls_context_match_criteria_impl.h │ │ │ ├── upstream_request.cc │ │ │ ├── upstream_request.h │ │ │ ├── vhds.cc │ │ │ └── vhds.h │ │ ├── runtime/ │ │ │ ├── BUILD │ │ │ ├── runtime_features.cc │ │ │ ├── runtime_features.h │ │ │ ├── runtime_impl.cc │ │ │ ├── runtime_impl.h │ │ │ └── runtime_protos.h │ │ ├── secret/ │ │ │ ├── BUILD │ │ │ ├── sds_api.cc │ │ │ ├── sds_api.h │ │ │ ├── secret_manager_impl.cc │ │ │ ├── secret_manager_impl.h │ │ │ ├── secret_provider_impl.cc │ │ │ └── secret_provider_impl.h │ │ ├── shared_pool/ │ │ │ ├── BUILD │ │ │ └── shared_pool.h │ │ ├── signal/ │ │ │ ├── BUILD │ │ │ ├── fatal_error_handler.cc │ │ │ ├── fatal_error_handler.h │ │ │ ├── signal_action.cc │ │ │ └── signal_action.h │ │ ├── singleton/ │ │ │ ├── BUILD │ │ │ ├── const_singleton.h │ │ │ ├── manager_impl.cc │ │ │ ├── manager_impl.h │ │ │ └── threadsafe_singleton.h │ │ ├── ssl/ │ │ │ ├── BUILD │ │ │ ├── certificate_validation_context_config_impl.cc │ │ │ ├── certificate_validation_context_config_impl.h │ │ │ ├── tls_certificate_config_impl.cc │ │ │ └── tls_certificate_config_impl.h │ │ ├── stats/ │ │ │ ├── BUILD │ │ │ ├── allocator_impl.cc │ │ │ ├── allocator_impl.h │ │ │ ├── histogram_impl.cc │ │ │ ├── histogram_impl.h │ │ │ ├── isolated_store_impl.cc │ │ │ ├── isolated_store_impl.h │ │ │ ├── metric_impl.cc │ │ │ ├── metric_impl.h │ │ │ ├── null_counter.h │ │ │ ├── null_gauge.h │ │ │ ├── null_text_readout.h │ │ │ ├── recent_lookups.cc │ │ │ ├── recent_lookups.h │ │ │ ├── scope_prefixer.cc │ │ │ ├── scope_prefixer.h │ │ │ ├── stat_merger.cc │ │ │ ├── stat_merger.h │ │ │ ├── stats_matcher_impl.cc │ │ │ ├── stats_matcher_impl.h │ │ │ ├── store_impl.h │ │ │ ├── symbol_table_impl.cc │ │ │ ├── symbol_table_impl.h │ │ │ ├── tag_extractor_impl.cc │ │ │ ├── tag_extractor_impl.h │ │ │ ├── tag_producer_impl.cc │ │ │ ├── tag_producer_impl.h │ │ │ ├── tag_utility.cc │ │ │ ├── tag_utility.h │ │ │ ├── thread_local_store.cc │ │ │ ├── thread_local_store.h │ │ │ ├── timespan_impl.cc │ │ │ ├── timespan_impl.h │ │ │ ├── utility.cc │ │ │ └── utility.h │ │ ├── stream_info/ │ │ │ ├── BUILD │ │ │ ├── filter_state_impl.cc │ │ │ ├── filter_state_impl.h │ │ │ ├── stream_info_impl.h │ │ │ ├── uint32_accessor_impl.h │ │ │ ├── utility.cc │ │ │ └── utility.h │ │ ├── tcp/ │ │ │ ├── BUILD │ │ │ ├── conn_pool.cc │ │ │ ├── conn_pool.h │ │ │ ├── original_conn_pool.cc │ │ │ └── original_conn_pool.h │ │ ├── tcp_proxy/ │ │ │ ├── BUILD │ │ │ ├── tcp_proxy.cc │ │ │ ├── tcp_proxy.h │ │ │ ├── upstream.cc │ │ │ └── upstream.h │ │ ├── thread_local/ │ │ │ ├── BUILD │ │ │ ├── thread_local_impl.cc │ │ │ └── thread_local_impl.h │ │ ├── tracing/ │ │ │ ├── BUILD │ │ │ ├── http_tracer_config_impl.h │ │ │ ├── http_tracer_impl.cc │ │ │ ├── http_tracer_impl.h │ │ │ ├── http_tracer_manager_impl.cc │ │ │ └── http_tracer_manager_impl.h │ │ ├── upstream/ │ │ │ ├── BUILD │ │ │ ├── cds_api_impl.cc │ │ │ ├── cds_api_impl.h │ │ │ ├── cluster_factory_impl.cc │ │ │ ├── cluster_factory_impl.h │ │ │ ├── cluster_manager_impl.cc │ │ │ ├── cluster_manager_impl.h │ │ │ ├── cluster_update_tracker.cc │ │ │ ├── cluster_update_tracker.h │ │ │ ├── conn_pool_map.h │ │ │ ├── conn_pool_map_impl.h │ │ │ ├── edf_scheduler.h │ │ │ ├── eds.cc │ │ │ ├── eds.h │ │ │ ├── health_checker_base_impl.cc │ │ │ ├── health_checker_base_impl.h │ │ │ ├── health_checker_impl.cc │ │ │ ├── health_checker_impl.h │ │ │ ├── health_discovery_service.cc │ │ │ ├── health_discovery_service.h │ │ │ ├── host_utility.cc │ │ │ ├── host_utility.h │ │ │ ├── load_balancer_impl.cc │ │ │ ├── load_balancer_impl.h │ │ │ ├── load_stats_reporter.cc │ │ │ ├── load_stats_reporter.h │ │ │ ├── logical_dns_cluster.cc │ │ │ ├── logical_dns_cluster.h │ │ │ ├── logical_host.cc │ │ │ ├── logical_host.h │ │ │ ├── maglev_lb.cc │ │ │ ├── maglev_lb.h │ │ │ ├── original_dst_cluster.cc │ │ │ ├── original_dst_cluster.h │ │ │ ├── outlier_detection_impl.cc │ │ │ ├── outlier_detection_impl.h │ │ │ ├── priority_conn_pool_map.h │ │ │ ├── priority_conn_pool_map_impl.h │ │ │ ├── resource_manager_impl.h │ │ │ ├── ring_hash_lb.cc │ │ │ ├── ring_hash_lb.h │ │ │ ├── static_cluster.cc │ │ │ ├── static_cluster.h │ │ │ ├── strict_dns_cluster.cc │ │ │ ├── strict_dns_cluster.h │ │ │ ├── subset_lb.cc │ │ │ ├── subset_lb.h │ │ │ ├── thread_aware_lb_impl.cc │ │ │ ├── thread_aware_lb_impl.h │ │ │ ├── transport_socket_match_impl.cc │ │ │ ├── transport_socket_match_impl.h │ │ │ ├── upstream_impl.cc │ │ │ └── upstream_impl.h │ │ └── version/ │ │ ├── BUILD │ │ ├── generate_version_linkstamp.sh │ │ ├── version.cc │ │ ├── version.h │ │ └── version_linkstamp.cc │ ├── docs/ │ │ ├── fancy_logger.md │ │ ├── filters/ │ │ │ └── http/ │ │ │ └── cache/ │ │ │ ├── cache_filter.md │ │ │ └── cache_filter_plugins.md │ │ ├── flow_control.md │ │ ├── h2_metadata.md │ │ ├── header_map.md │ │ ├── network_filter_fuzzing.md │ │ ├── quiche_integration.md │ │ ├── repokitteh.md │ │ ├── stats.md │ │ └── subset_load_balancer.md │ ├── exe/ │ │ ├── BUILD │ │ ├── main.cc │ │ ├── main_common.cc │ │ ├── main_common.h │ │ ├── platform_impl.h │ │ ├── posix/ │ │ │ └── platform_impl.cc │ │ ├── process_wide.cc │ │ ├── process_wide.h │ │ ├── terminate_handler.cc │ │ ├── terminate_handler.h │ │ └── win32/ │ │ └── platform_impl.cc │ ├── extensions/ │ │ ├── BUILD │ │ ├── access_loggers/ │ │ │ ├── BUILD │ │ │ ├── common/ │ │ │ │ ├── BUILD │ │ │ │ ├── access_log_base.cc │ │ │ │ └── access_log_base.h │ │ │ ├── file/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── file_access_log_impl.cc │ │ │ │ └── file_access_log_impl.h │ │ │ ├── grpc/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_utils.cc │ │ │ │ ├── config_utils.h │ │ │ │ ├── grpc_access_log_impl.cc │ │ │ │ ├── grpc_access_log_impl.h │ │ │ │ ├── grpc_access_log_proto_descriptors.cc │ │ │ │ ├── grpc_access_log_proto_descriptors.h │ │ │ │ ├── grpc_access_log_utils.cc │ │ │ │ ├── grpc_access_log_utils.h │ │ │ │ ├── http_config.cc │ │ │ │ ├── http_config.h │ │ │ │ ├── http_grpc_access_log_impl.cc │ │ │ │ ├── http_grpc_access_log_impl.h │ │ │ │ ├── tcp_config.cc │ │ │ │ ├── tcp_config.h │ │ │ │ ├── tcp_grpc_access_log_impl.cc │ │ │ │ └── tcp_grpc_access_log_impl.h │ │ │ ├── wasm/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ └── wasm_access_log_impl.h │ │ │ └── well_known_names.h │ │ ├── all_extensions.bzl │ │ ├── bootstrap/ │ │ │ └── wasm/ │ │ │ ├── BUILD │ │ │ ├── config.cc │ │ │ └── config.h │ │ ├── clusters/ │ │ │ ├── BUILD │ │ │ ├── aggregate/ │ │ │ │ ├── BUILD │ │ │ │ ├── cluster.cc │ │ │ │ ├── cluster.h │ │ │ │ └── lb_context.h │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ ├── BUILD │ │ │ │ ├── cluster.cc │ │ │ │ └── cluster.h │ │ │ ├── redis/ │ │ │ │ ├── BUILD │ │ │ │ ├── crc16.cc │ │ │ │ ├── crc16.h │ │ │ │ ├── redis_cluster.cc │ │ │ │ ├── redis_cluster.h │ │ │ │ ├── redis_cluster_lb.cc │ │ │ │ └── redis_cluster_lb.h │ │ │ └── well_known_names.h │ │ ├── common/ │ │ │ ├── BUILD │ │ │ ├── aws/ │ │ │ │ ├── BUILD │ │ │ │ ├── credentials_provider.h │ │ │ │ ├── credentials_provider_impl.cc │ │ │ │ ├── credentials_provider_impl.h │ │ │ │ ├── region_provider.h │ │ │ │ ├── region_provider_impl.cc │ │ │ │ ├── region_provider_impl.h │ │ │ │ ├── signer.h │ │ │ │ ├── signer_impl.cc │ │ │ │ ├── signer_impl.h │ │ │ │ ├── utility.cc │ │ │ │ └── utility.h │ │ │ ├── crypto/ │ │ │ │ ├── BUILD │ │ │ │ ├── crypto_impl.cc │ │ │ │ ├── crypto_impl.h │ │ │ │ ├── utility_impl.cc │ │ │ │ └── utility_impl.h │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ ├── BUILD │ │ │ │ ├── dns_cache.h │ │ │ │ ├── dns_cache_impl.cc │ │ │ │ ├── dns_cache_impl.h │ │ │ │ ├── dns_cache_manager_impl.cc │ │ │ │ ├── dns_cache_manager_impl.h │ │ │ │ ├── dns_cache_resource_manager.cc │ │ │ │ └── dns_cache_resource_manager.h │ │ │ ├── matcher/ │ │ │ │ ├── BUILD │ │ │ │ ├── matcher.cc │ │ │ │ └── matcher.h │ │ │ ├── proxy_protocol/ │ │ │ │ ├── BUILD │ │ │ │ ├── proxy_protocol_header.cc │ │ │ │ └── proxy_protocol_header.h │ │ │ ├── redis/ │ │ │ │ ├── BUILD │ │ │ │ ├── cluster_refresh_manager.h │ │ │ │ ├── cluster_refresh_manager_impl.cc │ │ │ │ └── cluster_refresh_manager_impl.h │ │ │ ├── sqlutils/ │ │ │ │ ├── BUILD │ │ │ │ ├── sqlutils.cc │ │ │ │ └── sqlutils.h │ │ │ ├── tap/ │ │ │ │ ├── BUILD │ │ │ │ ├── admin.cc │ │ │ │ ├── admin.h │ │ │ │ ├── extension_config_base.cc │ │ │ │ ├── extension_config_base.h │ │ │ │ ├── tap.h │ │ │ │ ├── tap_config_base.cc │ │ │ │ └── tap_config_base.h │ │ │ ├── utility.h │ │ │ └── wasm/ │ │ │ ├── BUILD │ │ │ ├── context.cc │ │ │ ├── context.h │ │ │ ├── ext/ │ │ │ │ ├── BUILD │ │ │ │ ├── README.md │ │ │ │ ├── declare_property.proto │ │ │ │ ├── envoy_null_plugin.h │ │ │ │ ├── envoy_null_vm_wasm_api.h │ │ │ │ ├── envoy_proxy_wasm_api.cc │ │ │ │ ├── envoy_proxy_wasm_api.h │ │ │ │ ├── envoy_wasm_intrinsics.js │ │ │ │ └── node_subset.proto │ │ │ ├── foreign.cc │ │ │ ├── wasm.cc │ │ │ ├── wasm.h │ │ │ ├── wasm_extension.cc │ │ │ ├── wasm_extension.h │ │ │ ├── wasm_state.cc │ │ │ ├── wasm_state.h │ │ │ ├── wasm_vm.cc │ │ │ ├── wasm_vm.h │ │ │ ├── wasm_vm_base.h │ │ │ └── well_known_names.h │ │ ├── compression/ │ │ │ ├── common/ │ │ │ │ ├── compressor/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── factory_base.h │ │ │ │ └── decompressor/ │ │ │ │ ├── BUILD │ │ │ │ └── factory_base.h │ │ │ └── gzip/ │ │ │ ├── common/ │ │ │ │ ├── BUILD │ │ │ │ ├── base.cc │ │ │ │ └── base.h │ │ │ ├── compressor/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── zlib_compressor_impl.cc │ │ │ │ └── zlib_compressor_impl.h │ │ │ └── decompressor/ │ │ │ ├── BUILD │ │ │ ├── config.cc │ │ │ ├── config.h │ │ │ ├── zlib_decompressor_impl.cc │ │ │ └── zlib_decompressor_impl.h │ │ ├── extensions_build_config.bzl │ │ ├── filters/ │ │ │ ├── common/ │ │ │ │ ├── expr/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── context.cc │ │ │ │ │ ├── context.h │ │ │ │ │ ├── evaluator.cc │ │ │ │ │ └── evaluator.h │ │ │ │ ├── ext_authz/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── check_request_utils.cc │ │ │ │ │ ├── check_request_utils.h │ │ │ │ │ ├── ext_authz.h │ │ │ │ │ ├── ext_authz_grpc_impl.cc │ │ │ │ │ ├── ext_authz_grpc_impl.h │ │ │ │ │ ├── ext_authz_http_impl.cc │ │ │ │ │ └── ext_authz_http_impl.h │ │ │ │ ├── fault/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── fault_config.cc │ │ │ │ │ └── fault_config.h │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── local_ratelimit_impl.cc │ │ │ │ │ └── local_ratelimit_impl.h │ │ │ │ ├── lua/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── lua.cc │ │ │ │ │ ├── lua.h │ │ │ │ │ ├── wrappers.cc │ │ │ │ │ └── wrappers.h │ │ │ │ ├── original_src/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── original_src_socket_option.cc │ │ │ │ │ ├── original_src_socket_option.h │ │ │ │ │ ├── socket_option_factory.cc │ │ │ │ │ └── socket_option_factory.h │ │ │ │ ├── ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── ratelimit.h │ │ │ │ │ ├── ratelimit_impl.cc │ │ │ │ │ ├── ratelimit_impl.h │ │ │ │ │ └── stat_names.h │ │ │ │ └── rbac/ │ │ │ │ ├── BUILD │ │ │ │ ├── engine.h │ │ │ │ ├── engine_impl.cc │ │ │ │ ├── engine_impl.h │ │ │ │ ├── matchers.cc │ │ │ │ ├── matchers.h │ │ │ │ ├── utility.cc │ │ │ │ └── utility.h │ │ │ ├── http/ │ │ │ │ ├── BUILD │ │ │ │ ├── adaptive_concurrency/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── adaptive_concurrency_filter.cc │ │ │ │ │ ├── adaptive_concurrency_filter.h │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ └── controller/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── controller.h │ │ │ │ │ ├── gradient_controller.cc │ │ │ │ │ └── gradient_controller.h │ │ │ │ ├── admission_control/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── admission_control.cc │ │ │ │ │ ├── admission_control.h │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── evaluators/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── response_evaluator.h │ │ │ │ │ │ ├── success_criteria_evaluator.cc │ │ │ │ │ │ └── success_criteria_evaluator.h │ │ │ │ │ ├── thread_local_controller.cc │ │ │ │ │ └── thread_local_controller.h │ │ │ │ ├── aws_lambda/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── aws_lambda_filter.cc │ │ │ │ │ ├── aws_lambda_filter.h │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ └── request_response.proto │ │ │ │ ├── aws_request_signing/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── aws_request_signing_filter.cc │ │ │ │ │ ├── aws_request_signing_filter.h │ │ │ │ │ ├── config.cc │ │ │ │ │ └── config.h │ │ │ │ ├── buffer/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── buffer_filter.cc │ │ │ │ │ ├── buffer_filter.h │ │ │ │ │ ├── config.cc │ │ │ │ │ └── config.h │ │ │ │ ├── cache/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── cache_filter.cc │ │ │ │ │ ├── cache_filter.h │ │ │ │ │ ├── cache_headers_utils.cc │ │ │ │ │ ├── cache_headers_utils.h │ │ │ │ │ ├── cacheability_utils.cc │ │ │ │ │ ├── cacheability_utils.h │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── http_cache.cc │ │ │ │ │ ├── http_cache.h │ │ │ │ │ ├── inline_headers_handles.h │ │ │ │ │ ├── key.proto │ │ │ │ │ └── simple_http_cache/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.proto │ │ │ │ │ ├── simple_http_cache.cc │ │ │ │ │ └── simple_http_cache.h │ │ │ │ ├── cdn_loop/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── filter.cc │ │ │ │ │ ├── filter.h │ │ │ │ │ ├── parser.cc │ │ │ │ │ ├── parser.h │ │ │ │ │ ├── utils.cc │ │ │ │ │ └── utils.h │ │ │ │ ├── common/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── compressor/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── compressor.cc │ │ │ │ │ │ └── compressor.h │ │ │ │ │ ├── factory_base.h │ │ │ │ │ ├── jwks_fetcher.cc │ │ │ │ │ ├── jwks_fetcher.h │ │ │ │ │ ├── pass_through_filter.h │ │ │ │ │ └── utility.h │ │ │ │ ├── compressor/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── compressor_filter.cc │ │ │ │ │ ├── compressor_filter.h │ │ │ │ │ ├── config.cc │ │ │ │ │ └── config.h │ │ │ │ ├── cors/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── cors_filter.cc │ │ │ │ │ └── cors_filter.h │ │ │ │ ├── csrf/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── csrf_filter.cc │ │ │ │ │ └── csrf_filter.h │ │ │ │ ├── decompressor/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── decompressor_filter.cc │ │ │ │ │ └── decompressor_filter.h │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── proxy_filter.cc │ │ │ │ │ └── proxy_filter.h │ │ │ │ ├── dynamo/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── dynamo_filter.cc │ │ │ │ │ ├── dynamo_filter.h │ │ │ │ │ ├── dynamo_request_parser.cc │ │ │ │ │ ├── dynamo_request_parser.h │ │ │ │ │ ├── dynamo_stats.cc │ │ │ │ │ └── dynamo_stats.h │ │ │ │ ├── ext_authz/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── ext_authz.cc │ │ │ │ │ └── ext_authz.h │ │ │ │ ├── fault/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── fault_filter.cc │ │ │ │ │ └── fault_filter.h │ │ │ │ ├── grpc_http1_bridge/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── http1_bridge_filter.cc │ │ │ │ │ └── http1_bridge_filter.h │ │ │ │ ├── grpc_http1_reverse_bridge/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── filter.cc │ │ │ │ │ └── filter.h │ │ │ │ ├── grpc_json_transcoder/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── http_body_utils.cc │ │ │ │ │ ├── http_body_utils.h │ │ │ │ │ ├── json_transcoder_filter.cc │ │ │ │ │ ├── json_transcoder_filter.h │ │ │ │ │ ├── transcoder_input_stream_impl.cc │ │ │ │ │ └── transcoder_input_stream_impl.h │ │ │ │ ├── grpc_stats/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── grpc_stats_filter.cc │ │ │ │ │ └── grpc_stats_filter.h │ │ │ │ ├── grpc_web/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── grpc_web_filter.cc │ │ │ │ │ └── grpc_web_filter.h │ │ │ │ ├── gzip/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── gzip_filter.cc │ │ │ │ │ └── gzip_filter.h │ │ │ │ ├── header_to_metadata/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── header_to_metadata_filter.cc │ │ │ │ │ └── header_to_metadata_filter.h │ │ │ │ ├── health_check/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── health_check.cc │ │ │ │ │ └── health_check.h │ │ │ │ ├── ip_tagging/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── ip_tagging_filter.cc │ │ │ │ │ └── ip_tagging_filter.h │ │ │ │ ├── jwt_authn/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── authenticator.cc │ │ │ │ │ ├── authenticator.h │ │ │ │ │ ├── extractor.cc │ │ │ │ │ ├── extractor.h │ │ │ │ │ ├── filter.cc │ │ │ │ │ ├── filter.h │ │ │ │ │ ├── filter_config.cc │ │ │ │ │ ├── filter_config.h │ │ │ │ │ ├── filter_factory.cc │ │ │ │ │ ├── filter_factory.h │ │ │ │ │ ├── jwks_cache.cc │ │ │ │ │ ├── jwks_cache.h │ │ │ │ │ ├── matcher.cc │ │ │ │ │ ├── matcher.h │ │ │ │ │ ├── verifier.cc │ │ │ │ │ └── verifier.h │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── local_ratelimit.cc │ │ │ │ │ └── local_ratelimit.h │ │ │ │ ├── lua/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── lua_filter.cc │ │ │ │ │ ├── lua_filter.h │ │ │ │ │ ├── wrappers.cc │ │ │ │ │ └── wrappers.h │ │ │ │ ├── oauth2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── filter.cc │ │ │ │ │ ├── filter.h │ │ │ │ │ ├── oauth.h │ │ │ │ │ ├── oauth_client.cc │ │ │ │ │ ├── oauth_client.h │ │ │ │ │ └── oauth_response.proto │ │ │ │ ├── on_demand/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── on_demand_update.cc │ │ │ │ │ └── on_demand_update.h │ │ │ │ ├── original_src/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── original_src.cc │ │ │ │ │ ├── original_src.h │ │ │ │ │ ├── original_src_config_factory.cc │ │ │ │ │ └── original_src_config_factory.h │ │ │ │ ├── ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── ratelimit.cc │ │ │ │ │ ├── ratelimit.h │ │ │ │ │ ├── ratelimit_headers.cc │ │ │ │ │ └── ratelimit_headers.h │ │ │ │ ├── rbac/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── rbac_filter.cc │ │ │ │ │ └── rbac_filter.h │ │ │ │ ├── router/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ └── config.h │ │ │ │ ├── squash/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── squash_filter.cc │ │ │ │ │ └── squash_filter.h │ │ │ │ ├── tap/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── tap_config.h │ │ │ │ │ ├── tap_config_impl.cc │ │ │ │ │ ├── tap_config_impl.h │ │ │ │ │ ├── tap_filter.cc │ │ │ │ │ └── tap_filter.h │ │ │ │ ├── wasm/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── wasm_filter.cc │ │ │ │ │ └── wasm_filter.h │ │ │ │ └── well_known_names.h │ │ │ ├── listener/ │ │ │ │ ├── BUILD │ │ │ │ ├── http_inspector/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── http_inspector.cc │ │ │ │ │ └── http_inspector.h │ │ │ │ ├── original_dst/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── original_dst.cc │ │ │ │ │ └── original_dst.h │ │ │ │ ├── original_src/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── original_src.cc │ │ │ │ │ ├── original_src.h │ │ │ │ │ ├── original_src_config_factory.cc │ │ │ │ │ └── original_src_config_factory.h │ │ │ │ ├── proxy_protocol/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── proxy_protocol.cc │ │ │ │ │ ├── proxy_protocol.h │ │ │ │ │ └── proxy_protocol_header.h │ │ │ │ ├── tls_inspector/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── tls_inspector.cc │ │ │ │ │ └── tls_inspector.h │ │ │ │ └── well_known_names.h │ │ │ ├── network/ │ │ │ │ ├── BUILD │ │ │ │ ├── client_ssl_auth/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── client_ssl_auth.cc │ │ │ │ │ ├── client_ssl_auth.h │ │ │ │ │ ├── config.cc │ │ │ │ │ └── config.h │ │ │ │ ├── common/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── factory_base.h │ │ │ │ │ ├── redis/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── client.h │ │ │ │ │ │ ├── client_impl.cc │ │ │ │ │ │ ├── client_impl.h │ │ │ │ │ │ ├── codec.h │ │ │ │ │ │ ├── codec_impl.cc │ │ │ │ │ │ ├── codec_impl.h │ │ │ │ │ │ ├── fault.h │ │ │ │ │ │ ├── fault_impl.cc │ │ │ │ │ │ ├── fault_impl.h │ │ │ │ │ │ ├── redis_command_stats.cc │ │ │ │ │ │ ├── redis_command_stats.h │ │ │ │ │ │ ├── supported_commands.h │ │ │ │ │ │ ├── utility.cc │ │ │ │ │ │ └── utility.h │ │ │ │ │ └── utility.h │ │ │ │ ├── direct_response/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── filter.cc │ │ │ │ │ └── filter.h │ │ │ │ ├── dubbo_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── active_message.cc │ │ │ │ │ ├── active_message.h │ │ │ │ │ ├── app_exception.cc │ │ │ │ │ ├── app_exception.h │ │ │ │ │ ├── buffer_helper.cc │ │ │ │ │ ├── buffer_helper.h │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── conn_manager.cc │ │ │ │ │ ├── conn_manager.h │ │ │ │ │ ├── decoder.cc │ │ │ │ │ ├── decoder.h │ │ │ │ │ ├── decoder_event_handler.h │ │ │ │ │ ├── dubbo_hessian2_serializer_impl.cc │ │ │ │ │ ├── dubbo_hessian2_serializer_impl.h │ │ │ │ │ ├── dubbo_protocol_impl.cc │ │ │ │ │ ├── dubbo_protocol_impl.h │ │ │ │ │ ├── filters/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── factory_base.h │ │ │ │ │ │ ├── filter.h │ │ │ │ │ │ ├── filter_config.h │ │ │ │ │ │ └── well_known_names.h │ │ │ │ │ ├── heartbeat_response.cc │ │ │ │ │ ├── heartbeat_response.h │ │ │ │ │ ├── hessian_utils.cc │ │ │ │ │ ├── hessian_utils.h │ │ │ │ │ ├── message.h │ │ │ │ │ ├── message_impl.h │ │ │ │ │ ├── metadata.h │ │ │ │ │ ├── protocol.h │ │ │ │ │ ├── protocol_constants.h │ │ │ │ │ ├── router/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── config.cc │ │ │ │ │ │ ├── config.h │ │ │ │ │ │ ├── route.h │ │ │ │ │ │ ├── route_matcher.cc │ │ │ │ │ │ ├── route_matcher.h │ │ │ │ │ │ ├── router.h │ │ │ │ │ │ ├── router_impl.cc │ │ │ │ │ │ └── router_impl.h │ │ │ │ │ ├── serializer.h │ │ │ │ │ ├── serializer_impl.cc │ │ │ │ │ ├── serializer_impl.h │ │ │ │ │ └── stats.h │ │ │ │ ├── echo/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── echo.cc │ │ │ │ │ └── echo.h │ │ │ │ ├── ext_authz/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── ext_authz.cc │ │ │ │ │ └── ext_authz.h │ │ │ │ ├── http_connection_manager/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ └── config.h │ │ │ │ ├── kafka/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── broker/ │ │ │ │ │ │ ├── config.cc │ │ │ │ │ │ ├── config.h │ │ │ │ │ │ ├── filter.cc │ │ │ │ │ │ └── filter.h │ │ │ │ │ ├── codec.h │ │ │ │ │ ├── kafka_request.h │ │ │ │ │ ├── kafka_request_parser.cc │ │ │ │ │ ├── kafka_request_parser.h │ │ │ │ │ ├── kafka_response.h │ │ │ │ │ ├── kafka_response_parser.cc │ │ │ │ │ ├── kafka_response_parser.h │ │ │ │ │ ├── kafka_types.h │ │ │ │ │ ├── parser.h │ │ │ │ │ ├── protocol/ │ │ │ │ │ │ ├── complex_type_template.j2 │ │ │ │ │ │ ├── generator.py │ │ │ │ │ │ ├── kafka_request_resolver_cc.j2 │ │ │ │ │ │ ├── kafka_response_resolver_cc.j2 │ │ │ │ │ │ ├── launcher.py │ │ │ │ │ │ ├── request_metrics_h.j2 │ │ │ │ │ │ ├── request_parser.j2 │ │ │ │ │ │ ├── requests_h.j2 │ │ │ │ │ │ ├── response_metrics_h.j2 │ │ │ │ │ │ ├── response_parser.j2 │ │ │ │ │ │ └── responses_h.j2 │ │ │ │ │ ├── request_codec.cc │ │ │ │ │ ├── request_codec.h │ │ │ │ │ ├── requirements.txt │ │ │ │ │ ├── response_codec.cc │ │ │ │ │ ├── response_codec.h │ │ │ │ │ ├── serialization/ │ │ │ │ │ │ ├── generator.py │ │ │ │ │ │ ├── launcher.py │ │ │ │ │ │ └── serialization_composite_h.j2 │ │ │ │ │ ├── serialization.cc │ │ │ │ │ ├── serialization.h │ │ │ │ │ └── tagged_fields.h │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── local_ratelimit.cc │ │ │ │ │ └── local_ratelimit.h │ │ │ │ ├── mongo_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── bson.h │ │ │ │ │ ├── bson_impl.cc │ │ │ │ │ ├── bson_impl.h │ │ │ │ │ ├── codec.h │ │ │ │ │ ├── codec_impl.cc │ │ │ │ │ ├── codec_impl.h │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── mongo_stats.cc │ │ │ │ │ ├── mongo_stats.h │ │ │ │ │ ├── proxy.cc │ │ │ │ │ ├── proxy.h │ │ │ │ │ ├── utility.cc │ │ │ │ │ └── utility.h │ │ │ │ ├── mysql_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── mysql_codec.h │ │ │ │ │ ├── mysql_codec_clogin.cc │ │ │ │ │ ├── mysql_codec_clogin.h │ │ │ │ │ ├── mysql_codec_clogin_resp.cc │ │ │ │ │ ├── mysql_codec_clogin_resp.h │ │ │ │ │ ├── mysql_codec_command.cc │ │ │ │ │ ├── mysql_codec_command.h │ │ │ │ │ ├── mysql_codec_greeting.cc │ │ │ │ │ ├── mysql_codec_greeting.h │ │ │ │ │ ├── mysql_codec_switch_resp.cc │ │ │ │ │ ├── mysql_codec_switch_resp.h │ │ │ │ │ ├── mysql_config.cc │ │ │ │ │ ├── mysql_config.h │ │ │ │ │ ├── mysql_decoder.cc │ │ │ │ │ ├── mysql_decoder.h │ │ │ │ │ ├── mysql_filter.cc │ │ │ │ │ ├── mysql_filter.h │ │ │ │ │ ├── mysql_session.h │ │ │ │ │ ├── mysql_utils.cc │ │ │ │ │ └── mysql_utils.h │ │ │ │ ├── postgres_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── postgres_decoder.cc │ │ │ │ │ ├── postgres_decoder.h │ │ │ │ │ ├── postgres_filter.cc │ │ │ │ │ ├── postgres_filter.h │ │ │ │ │ ├── postgres_message.cc │ │ │ │ │ ├── postgres_message.h │ │ │ │ │ └── postgres_session.h │ │ │ │ ├── ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── ratelimit.cc │ │ │ │ │ └── ratelimit.h │ │ │ │ ├── rbac/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── rbac_filter.cc │ │ │ │ │ └── rbac_filter.h │ │ │ │ ├── redis_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── command_splitter.h │ │ │ │ │ ├── command_splitter_impl.cc │ │ │ │ │ ├── command_splitter_impl.h │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── conn_pool.h │ │ │ │ │ ├── conn_pool_impl.cc │ │ │ │ │ ├── conn_pool_impl.h │ │ │ │ │ ├── proxy_filter.cc │ │ │ │ │ ├── proxy_filter.h │ │ │ │ │ ├── router.h │ │ │ │ │ ├── router_impl.cc │ │ │ │ │ └── router_impl.h │ │ │ │ ├── rocketmq_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── active_message.cc │ │ │ │ │ ├── active_message.h │ │ │ │ │ ├── codec.cc │ │ │ │ │ ├── codec.h │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── conn_manager.cc │ │ │ │ │ ├── conn_manager.h │ │ │ │ │ ├── metadata.h │ │ │ │ │ ├── protocol.cc │ │ │ │ │ ├── protocol.h │ │ │ │ │ ├── router/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── route_matcher.cc │ │ │ │ │ │ ├── route_matcher.h │ │ │ │ │ │ ├── router.h │ │ │ │ │ │ ├── router_impl.cc │ │ │ │ │ │ └── router_impl.h │ │ │ │ │ ├── stats.h │ │ │ │ │ ├── topic_route.cc │ │ │ │ │ ├── topic_route.h │ │ │ │ │ └── well_known_names.h │ │ │ │ ├── sni_cluster/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── sni_cluster.cc │ │ │ │ │ └── sni_cluster.h │ │ │ │ ├── sni_dynamic_forward_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── proxy_filter.cc │ │ │ │ │ └── proxy_filter.h │ │ │ │ ├── tcp_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ └── config.h │ │ │ │ ├── thrift_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── app_exception_impl.cc │ │ │ │ │ ├── app_exception_impl.h │ │ │ │ │ ├── auto_protocol_impl.cc │ │ │ │ │ ├── auto_protocol_impl.h │ │ │ │ │ ├── auto_transport_impl.cc │ │ │ │ │ ├── auto_transport_impl.h │ │ │ │ │ ├── binary_protocol_impl.cc │ │ │ │ │ ├── binary_protocol_impl.h │ │ │ │ │ ├── buffer_helper.cc │ │ │ │ │ ├── buffer_helper.h │ │ │ │ │ ├── compact_protocol_impl.cc │ │ │ │ │ ├── compact_protocol_impl.h │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── conn_manager.cc │ │ │ │ │ ├── conn_manager.h │ │ │ │ │ ├── conn_state.h │ │ │ │ │ ├── decoder.cc │ │ │ │ │ ├── decoder.h │ │ │ │ │ ├── decoder_events.h │ │ │ │ │ ├── docs/ │ │ │ │ │ │ ├── thrift_state_machine.dot │ │ │ │ │ │ └── thrift_state_machine.md │ │ │ │ │ ├── filters/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── factory_base.h │ │ │ │ │ │ ├── filter.h │ │ │ │ │ │ ├── filter_config.h │ │ │ │ │ │ ├── pass_through_filter.h │ │ │ │ │ │ ├── ratelimit/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ ├── config.cc │ │ │ │ │ │ │ ├── config.h │ │ │ │ │ │ │ ├── ratelimit.cc │ │ │ │ │ │ │ └── ratelimit.h │ │ │ │ │ │ └── well_known_names.h │ │ │ │ │ ├── framed_transport_impl.cc │ │ │ │ │ ├── framed_transport_impl.h │ │ │ │ │ ├── header_transport_impl.cc │ │ │ │ │ ├── header_transport_impl.h │ │ │ │ │ ├── metadata.h │ │ │ │ │ ├── protocol.h │ │ │ │ │ ├── protocol_converter.h │ │ │ │ │ ├── router/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── config.cc │ │ │ │ │ │ ├── config.h │ │ │ │ │ │ ├── router.h │ │ │ │ │ │ ├── router_impl.cc │ │ │ │ │ │ ├── router_impl.h │ │ │ │ │ │ ├── router_ratelimit.h │ │ │ │ │ │ ├── router_ratelimit_impl.cc │ │ │ │ │ │ └── router_ratelimit_impl.h │ │ │ │ │ ├── stats.h │ │ │ │ │ ├── thrift.h │ │ │ │ │ ├── thrift_object.h │ │ │ │ │ ├── thrift_object_impl.cc │ │ │ │ │ ├── thrift_object_impl.h │ │ │ │ │ ├── tracing.h │ │ │ │ │ ├── transport.h │ │ │ │ │ ├── twitter_protocol_impl.cc │ │ │ │ │ ├── twitter_protocol_impl.h │ │ │ │ │ ├── unframed_transport_impl.cc │ │ │ │ │ └── unframed_transport_impl.h │ │ │ │ ├── wasm/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── wasm_filter.cc │ │ │ │ │ └── wasm_filter.h │ │ │ │ ├── well_known_names.h │ │ │ │ └── zookeeper_proxy/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── decoder.cc │ │ │ │ ├── decoder.h │ │ │ │ ├── filter.cc │ │ │ │ ├── filter.h │ │ │ │ ├── utils.cc │ │ │ │ └── utils.h │ │ │ └── udp/ │ │ │ ├── dns_filter/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── dns_filter.cc │ │ │ │ ├── dns_filter.h │ │ │ │ ├── dns_filter_constants.h │ │ │ │ ├── dns_filter_resolver.cc │ │ │ │ ├── dns_filter_resolver.h │ │ │ │ ├── dns_filter_utils.cc │ │ │ │ ├── dns_filter_utils.h │ │ │ │ ├── dns_parser.cc │ │ │ │ └── dns_parser.h │ │ │ └── udp_proxy/ │ │ │ ├── BUILD │ │ │ ├── config.cc │ │ │ ├── config.h │ │ │ ├── hash_policy_impl.cc │ │ │ ├── hash_policy_impl.h │ │ │ ├── udp_proxy_filter.cc │ │ │ └── udp_proxy_filter.h │ │ ├── grpc_credentials/ │ │ │ ├── BUILD │ │ │ ├── aws_iam/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ └── config.h │ │ │ ├── example/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ └── config.h │ │ │ ├── file_based_metadata/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ └── config.h │ │ │ └── well_known_names.h │ │ ├── health_checkers/ │ │ │ ├── BUILD │ │ │ ├── redis/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── redis.cc │ │ │ │ ├── redis.h │ │ │ │ └── utility.h │ │ │ └── well_known_names.h │ │ ├── internal_redirect/ │ │ │ ├── BUILD │ │ │ ├── allow_listed_routes/ │ │ │ │ ├── BUILD │ │ │ │ ├── allow_listed_routes.h │ │ │ │ ├── config.cc │ │ │ │ └── config.h │ │ │ ├── previous_routes/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── previous_routes.cc │ │ │ │ └── previous_routes.h │ │ │ ├── safe_cross_scheme/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ └── safe_cross_scheme.h │ │ │ └── well_known_names.h │ │ ├── quic_listeners/ │ │ │ └── quiche/ │ │ │ ├── BUILD │ │ │ ├── active_quic_listener.cc │ │ │ ├── active_quic_listener.h │ │ │ ├── active_quic_listener_config.cc │ │ │ ├── active_quic_listener_config.h │ │ │ ├── codec_impl.cc │ │ │ ├── codec_impl.h │ │ │ ├── envoy_quic_alarm.cc │ │ │ ├── envoy_quic_alarm.h │ │ │ ├── envoy_quic_alarm_factory.cc │ │ │ ├── envoy_quic_alarm_factory.h │ │ │ ├── envoy_quic_client_connection.cc │ │ │ ├── envoy_quic_client_connection.h │ │ │ ├── envoy_quic_client_session.cc │ │ │ ├── envoy_quic_client_session.h │ │ │ ├── envoy_quic_client_stream.cc │ │ │ ├── envoy_quic_client_stream.h │ │ │ ├── envoy_quic_connection.cc │ │ │ ├── envoy_quic_connection.h │ │ │ ├── envoy_quic_connection_helper.h │ │ │ ├── envoy_quic_dispatcher.cc │ │ │ ├── envoy_quic_dispatcher.h │ │ │ ├── envoy_quic_packet_writer.cc │ │ │ ├── envoy_quic_packet_writer.h │ │ │ ├── envoy_quic_proof_source.cc │ │ │ ├── envoy_quic_proof_source.h │ │ │ ├── envoy_quic_proof_source_base.cc │ │ │ ├── envoy_quic_proof_source_base.h │ │ │ ├── envoy_quic_proof_verifier.cc │ │ │ ├── envoy_quic_proof_verifier.h │ │ │ ├── envoy_quic_proof_verifier_base.cc │ │ │ ├── envoy_quic_proof_verifier_base.h │ │ │ ├── envoy_quic_server_connection.cc │ │ │ ├── envoy_quic_server_connection.h │ │ │ ├── envoy_quic_server_session.cc │ │ │ ├── envoy_quic_server_session.h │ │ │ ├── envoy_quic_server_stream.cc │ │ │ ├── envoy_quic_server_stream.h │ │ │ ├── envoy_quic_simulated_watermark_buffer.h │ │ │ ├── envoy_quic_stream.h │ │ │ ├── envoy_quic_utils.cc │ │ │ ├── envoy_quic_utils.h │ │ │ ├── platform/ │ │ │ │ ├── BUILD │ │ │ │ ├── envoy_quic_clock.cc │ │ │ │ ├── envoy_quic_clock.h │ │ │ │ ├── flags_impl.cc │ │ │ │ ├── flags_impl.h │ │ │ │ ├── flags_list.h │ │ │ │ ├── http2_arraysize_impl.h │ │ │ │ ├── http2_bug_tracker_impl.h │ │ │ │ ├── http2_containers_impl.h │ │ │ │ ├── http2_estimate_memory_usage_impl.h │ │ │ │ ├── http2_flag_utils_impl.h │ │ │ │ ├── http2_flags_impl.h │ │ │ │ ├── http2_logging_impl.h │ │ │ │ ├── http2_macros_impl.h │ │ │ │ ├── http2_string_piece_impl.h │ │ │ │ ├── http2_string_utils_impl.h │ │ │ │ ├── quic_aligned_impl.h │ │ │ │ ├── quic_bug_tracker_impl.h │ │ │ │ ├── quic_cert_utils_impl.cc │ │ │ │ ├── quic_cert_utils_impl.h │ │ │ │ ├── quic_client_stats_impl.h │ │ │ │ ├── quic_containers_impl.h │ │ │ │ ├── quic_error_code_wrappers_impl.h │ │ │ │ ├── quic_estimate_memory_usage_impl.h │ │ │ │ ├── quic_export_impl.h │ │ │ │ ├── quic_fallthrough_impl.h │ │ │ │ ├── quic_file_utils_impl.cc │ │ │ │ ├── quic_file_utils_impl.h │ │ │ │ ├── quic_flag_utils_impl.h │ │ │ │ ├── quic_flags_impl.h │ │ │ │ ├── quic_hostname_utils_impl.cc │ │ │ │ ├── quic_hostname_utils_impl.h │ │ │ │ ├── quic_iovec_impl.h │ │ │ │ ├── quic_logging_impl.cc │ │ │ │ ├── quic_logging_impl.h │ │ │ │ ├── quic_macros_impl.h │ │ │ │ ├── quic_map_util_impl.h │ │ │ │ ├── quic_mem_slice_impl.cc │ │ │ │ ├── quic_mem_slice_impl.h │ │ │ │ ├── quic_mem_slice_span_impl.cc │ │ │ │ ├── quic_mem_slice_span_impl.h │ │ │ │ ├── quic_mem_slice_storage_impl.cc │ │ │ │ ├── quic_mem_slice_storage_impl.h │ │ │ │ ├── quic_mutex_impl.h │ │ │ │ ├── quic_pcc_sender_impl.h │ │ │ │ ├── quic_prefetch_impl.h │ │ │ │ ├── quic_ptr_util_impl.h │ │ │ │ ├── quic_reference_counted_impl.h │ │ │ │ ├── quic_server_stats_impl.h │ │ │ │ ├── quic_stack_trace_impl.h │ │ │ │ ├── quic_stream_buffer_allocator_impl.h │ │ │ │ ├── quic_string_utils_impl.h │ │ │ │ ├── quic_udp_socket_platform_impl.h │ │ │ │ ├── quic_uint128_impl.h │ │ │ │ ├── quiche_arraysize_impl.h │ │ │ │ ├── quiche_endian_impl.h │ │ │ │ ├── quiche_export_impl.h │ │ │ │ ├── quiche_logging_impl.h │ │ │ │ ├── quiche_map_util_impl.h │ │ │ │ ├── quiche_optional_impl.h │ │ │ │ ├── quiche_ptr_util_impl.h │ │ │ │ ├── quiche_str_cat_impl.h │ │ │ │ ├── quiche_string_piece_impl.h │ │ │ │ ├── quiche_text_utils_impl.h │ │ │ │ ├── quiche_time_utils_impl.cc │ │ │ │ ├── quiche_time_utils_impl.h │ │ │ │ ├── quiche_unordered_containers_impl.h │ │ │ │ ├── spdy_arraysize_impl.h │ │ │ │ ├── spdy_bug_tracker_impl.h │ │ │ │ ├── spdy_containers_impl.h │ │ │ │ ├── spdy_endianness_util_impl.h │ │ │ │ ├── spdy_estimate_memory_usage_impl.h │ │ │ │ ├── spdy_flags_impl.h │ │ │ │ ├── spdy_logging_impl.h │ │ │ │ ├── spdy_macros_impl.h │ │ │ │ ├── spdy_mem_slice_impl.h │ │ │ │ ├── spdy_string_utils_impl.h │ │ │ │ ├── spdy_test_helpers_impl.h │ │ │ │ ├── spdy_test_utils_prod_impl.h │ │ │ │ ├── spdy_unsafe_arena_impl.h │ │ │ │ ├── string_utils.cc │ │ │ │ └── string_utils.h │ │ │ ├── quic_filter_manager_connection_impl.cc │ │ │ ├── quic_filter_manager_connection_impl.h │ │ │ ├── quic_io_handle_wrapper.h │ │ │ ├── quic_transport_socket_factory.cc │ │ │ ├── quic_transport_socket_factory.h │ │ │ ├── spdy_server_push_utils_for_envoy.cc │ │ │ ├── udp_gso_batch_writer.cc │ │ │ ├── udp_gso_batch_writer.h │ │ │ ├── udp_gso_batch_writer_config.cc │ │ │ └── udp_gso_batch_writer_config.h │ │ ├── resource_monitors/ │ │ │ ├── BUILD │ │ │ ├── common/ │ │ │ │ ├── BUILD │ │ │ │ └── factory_base.h │ │ │ ├── fixed_heap/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── fixed_heap_monitor.cc │ │ │ │ └── fixed_heap_monitor.h │ │ │ ├── injected_resource/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── injected_resource_monitor.cc │ │ │ │ └── injected_resource_monitor.h │ │ │ └── well_known_names.h │ │ ├── retry/ │ │ │ ├── host/ │ │ │ │ ├── omit_canary_hosts/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ └── omit_canary_hosts.h │ │ │ │ ├── omit_host_metadata/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.cc │ │ │ │ │ ├── config.h │ │ │ │ │ ├── omit_host_metadata.cc │ │ │ │ │ └── omit_host_metadata.h │ │ │ │ └── previous_hosts/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ └── previous_hosts.h │ │ │ └── priority/ │ │ │ ├── BUILD │ │ │ ├── previous_priorities/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── previous_priorities.cc │ │ │ │ └── previous_priorities.h │ │ │ └── well_known_names.h │ │ ├── stat_sinks/ │ │ │ ├── BUILD │ │ │ ├── common/ │ │ │ │ └── statsd/ │ │ │ │ ├── BUILD │ │ │ │ ├── statsd.cc │ │ │ │ └── statsd.h │ │ │ ├── dog_statsd/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ └── config.h │ │ │ ├── hystrix/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── hystrix.cc │ │ │ │ └── hystrix.h │ │ │ ├── metrics_service/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── grpc_metrics_proto_descriptors.cc │ │ │ │ ├── grpc_metrics_proto_descriptors.h │ │ │ │ ├── grpc_metrics_service_impl.cc │ │ │ │ └── grpc_metrics_service_impl.h │ │ │ ├── statsd/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ └── config.h │ │ │ ├── wasm/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ └── wasm_stat_sink_impl.h │ │ │ └── well_known_names.h │ │ ├── tracers/ │ │ │ ├── common/ │ │ │ │ ├── BUILD │ │ │ │ ├── factory_base.h │ │ │ │ └── ot/ │ │ │ │ ├── BUILD │ │ │ │ ├── opentracing_driver_impl.cc │ │ │ │ └── opentracing_driver_impl.h │ │ │ ├── datadog/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── datadog_tracer_impl.cc │ │ │ │ └── datadog_tracer_impl.h │ │ │ ├── dynamic_ot/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── dynamic_opentracing_driver_impl.cc │ │ │ │ └── dynamic_opentracing_driver_impl.h │ │ │ ├── lightstep/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── lightstep_tracer_impl.cc │ │ │ │ └── lightstep_tracer_impl.h │ │ │ ├── opencensus/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── opencensus_tracer_impl.cc │ │ │ │ └── opencensus_tracer_impl.h │ │ │ ├── xray/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── daemon.proto │ │ │ │ ├── daemon_broker.cc │ │ │ │ ├── daemon_broker.h │ │ │ │ ├── localized_sampling.cc │ │ │ │ ├── localized_sampling.h │ │ │ │ ├── reservoir.h │ │ │ │ ├── sampling_strategy.h │ │ │ │ ├── tracer.cc │ │ │ │ ├── tracer.h │ │ │ │ ├── util.cc │ │ │ │ ├── util.h │ │ │ │ ├── xray_configuration.h │ │ │ │ ├── xray_tracer_impl.cc │ │ │ │ └── xray_tracer_impl.h │ │ │ └── zipkin/ │ │ │ ├── BUILD │ │ │ ├── config.cc │ │ │ ├── config.h │ │ │ ├── span_buffer.cc │ │ │ ├── span_buffer.h │ │ │ ├── span_context.cc │ │ │ ├── span_context.h │ │ │ ├── span_context_extractor.cc │ │ │ ├── span_context_extractor.h │ │ │ ├── tracer.cc │ │ │ ├── tracer.h │ │ │ ├── tracer_interface.h │ │ │ ├── util.cc │ │ │ ├── util.h │ │ │ ├── zipkin_core_constants.h │ │ │ ├── zipkin_core_types.cc │ │ │ ├── zipkin_core_types.h │ │ │ ├── zipkin_json_field_names.h │ │ │ ├── zipkin_tracer_impl.cc │ │ │ └── zipkin_tracer_impl.h │ │ ├── transport_sockets/ │ │ │ ├── BUILD │ │ │ ├── alts/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── grpc_tsi.h │ │ │ │ ├── noop_transport_socket_callbacks.h │ │ │ │ ├── tsi_frame_protector.cc │ │ │ │ ├── tsi_frame_protector.h │ │ │ │ ├── tsi_handshaker.cc │ │ │ │ ├── tsi_handshaker.h │ │ │ │ ├── tsi_socket.cc │ │ │ │ └── tsi_socket.h │ │ │ ├── common/ │ │ │ │ ├── BUILD │ │ │ │ ├── passthrough.cc │ │ │ │ └── passthrough.h │ │ │ ├── proxy_protocol/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── proxy_protocol.cc │ │ │ │ └── proxy_protocol.h │ │ │ ├── raw_buffer/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ └── config.h │ │ │ ├── tap/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── tap.cc │ │ │ │ ├── tap.h │ │ │ │ ├── tap_config.h │ │ │ │ ├── tap_config_impl.cc │ │ │ │ └── tap_config_impl.h │ │ │ ├── tls/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── context_config_impl.cc │ │ │ │ ├── context_config_impl.h │ │ │ │ ├── context_impl.cc │ │ │ │ ├── context_impl.h │ │ │ │ ├── context_manager_impl.cc │ │ │ │ ├── context_manager_impl.h │ │ │ │ ├── io_handle_bio.cc │ │ │ │ ├── io_handle_bio.h │ │ │ │ ├── ocsp/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── asn1_utility.cc │ │ │ │ │ ├── asn1_utility.h │ │ │ │ │ ├── ocsp.cc │ │ │ │ │ └── ocsp.h │ │ │ │ ├── private_key/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── private_key_manager_impl.cc │ │ │ │ │ └── private_key_manager_impl.h │ │ │ │ ├── ssl_handshaker.cc │ │ │ │ ├── ssl_handshaker.h │ │ │ │ ├── ssl_socket.cc │ │ │ │ ├── ssl_socket.h │ │ │ │ ├── utility.cc │ │ │ │ └── utility.h │ │ │ └── well_known_names.h │ │ ├── upstreams/ │ │ │ └── http/ │ │ │ ├── generic/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ └── config.h │ │ │ ├── http/ │ │ │ │ ├── BUILD │ │ │ │ ├── config.cc │ │ │ │ ├── config.h │ │ │ │ ├── upstream_request.cc │ │ │ │ └── upstream_request.h │ │ │ └── tcp/ │ │ │ ├── BUILD │ │ │ ├── config.cc │ │ │ ├── config.h │ │ │ ├── upstream_request.cc │ │ │ └── upstream_request.h │ │ └── watchdog/ │ │ ├── abort_action/ │ │ │ ├── BUILD │ │ │ ├── abort_action.cc │ │ │ ├── abort_action.h │ │ │ ├── config.cc │ │ │ └── config.h │ │ └── profile_action/ │ │ ├── BUILD │ │ ├── config.cc │ │ ├── config.h │ │ ├── profile_action.cc │ │ └── profile_action.h │ └── server/ │ ├── BUILD │ ├── active_raw_udp_listener_config.cc │ ├── active_raw_udp_listener_config.h │ ├── admin/ │ │ ├── BUILD │ │ ├── admin.cc │ │ ├── admin.h │ │ ├── admin_filter.cc │ │ ├── admin_filter.h │ │ ├── clusters_handler.cc │ │ ├── clusters_handler.h │ │ ├── config_dump_handler.cc │ │ ├── config_dump_handler.h │ │ ├── config_tracker_impl.cc │ │ ├── config_tracker_impl.h │ │ ├── handler_ctx.h │ │ ├── init_dump_handler.cc │ │ ├── init_dump_handler.h │ │ ├── listeners_handler.cc │ │ ├── listeners_handler.h │ │ ├── logs_handler.cc │ │ ├── logs_handler.h │ │ ├── profiling_handler.cc │ │ ├── profiling_handler.h │ │ ├── prometheus_stats.cc │ │ ├── prometheus_stats.h │ │ ├── runtime_handler.cc │ │ ├── runtime_handler.h │ │ ├── server_cmd_handler.cc │ │ ├── server_cmd_handler.h │ │ ├── server_info_handler.cc │ │ ├── server_info_handler.h │ │ ├── stats_handler.cc │ │ ├── stats_handler.h │ │ ├── utils.cc │ │ └── utils.h │ ├── api_listener_impl.cc │ ├── api_listener_impl.h │ ├── backtrace.cc │ ├── backtrace.h │ ├── config_validation/ │ │ ├── BUILD │ │ ├── admin.cc │ │ ├── admin.h │ │ ├── api.cc │ │ ├── api.h │ │ ├── async_client.cc │ │ ├── async_client.h │ │ ├── cluster_manager.cc │ │ ├── cluster_manager.h │ │ ├── connection.h │ │ ├── dispatcher.cc │ │ ├── dispatcher.h │ │ ├── dns.cc │ │ ├── dns.h │ │ ├── server.cc │ │ └── server.h │ ├── configuration_impl.cc │ ├── configuration_impl.h │ ├── connection_handler_impl.cc │ ├── connection_handler_impl.h │ ├── drain_manager_impl.cc │ ├── drain_manager_impl.h │ ├── filter_chain_factory_context_callback.h │ ├── filter_chain_manager_impl.cc │ ├── filter_chain_manager_impl.h │ ├── guarddog_impl.cc │ ├── guarddog_impl.h │ ├── hot_restart.proto │ ├── hot_restart_impl.cc │ ├── hot_restart_impl.h │ ├── hot_restart_nop_impl.h │ ├── hot_restarting_base.cc │ ├── hot_restarting_base.h │ ├── hot_restarting_child.cc │ ├── hot_restarting_child.h │ ├── hot_restarting_parent.cc │ ├── hot_restarting_parent.h │ ├── lds_api.cc │ ├── lds_api.h │ ├── listener_hooks.h │ ├── listener_impl.cc │ ├── listener_impl.h │ ├── listener_manager_impl.cc │ ├── listener_manager_impl.h │ ├── options_impl.cc │ ├── options_impl.h │ ├── options_impl_platform.h │ ├── options_impl_platform_default.cc │ ├── options_impl_platform_linux.cc │ ├── options_impl_platform_linux.h │ ├── overload_manager_impl.cc │ ├── overload_manager_impl.h │ ├── process_context_impl.h │ ├── proto_descriptors.cc │ ├── proto_descriptors.h │ ├── resource_monitor_config_impl.h │ ├── server.cc │ ├── server.h │ ├── ssl_context_manager.cc │ ├── ssl_context_manager.h │ ├── transport_socket_config_impl.h │ ├── watchdog_impl.cc │ ├── watchdog_impl.h │ ├── well_known_names.h │ ├── worker_impl.cc │ └── worker_impl.h ├── support/ │ ├── README.md │ ├── bootstrap │ └── hooks/ │ ├── pre-push │ └── prepare-commit-msg ├── test/ │ ├── BUILD │ ├── README.md │ ├── benchmark/ │ │ ├── BUILD │ │ ├── main.cc │ │ └── main.h │ ├── common/ │ │ ├── access_log/ │ │ │ ├── BUILD │ │ │ ├── access_log_impl_test.cc │ │ │ └── access_log_manager_impl_test.cc │ │ ├── buffer/ │ │ │ ├── BUILD │ │ │ ├── buffer_corpus/ │ │ │ │ ├── basic │ │ │ │ ├── case │ │ │ │ ├── clusterfuzz-testcase-minimized-buffer_fuzz_test-5080353465696256 │ │ │ │ ├── clusterfuzz-testcase-minimized-buffer_fuzz_test-5644734729551872 │ │ │ │ ├── clusterfuzz-testcase-minimized-buffer_fuzz_test-5654939127250944 │ │ │ │ ├── clusterfuzz-testcase-minimized-buffer_fuzz_test-5664992304562176 │ │ │ │ ├── clusterfuzz-testcase-minimized-buffer_fuzz_test-5668091688648704 │ │ │ │ ├── clusterfuzz-testcase-minimized-buffer_fuzz_test-5669274699431936 │ │ │ │ ├── clusterfuzz-testcase-minimized-buffer_fuzz_test-5716850116132864 │ │ │ │ ├── clusterfuzz-testcase-minimized-buffer_fuzz_test-5744501719564288 │ │ │ │ ├── clusterfuzz-testcase-minimized-buffer_fuzz_test-5760708737761280 │ │ │ │ ├── clusterfuzz-testcase-minimized-new_buffer_fuzz_test-5714377684025344 │ │ │ │ ├── crash-d60939b6186fa6186e0b574ac67aa6df8f1081cd │ │ │ │ └── crash-ed103900aec1285149aafc05102a541d9ec51363 │ │ │ ├── buffer_fuzz.cc │ │ │ ├── buffer_fuzz.h │ │ │ ├── buffer_fuzz.proto │ │ │ ├── buffer_fuzz_test.cc │ │ │ ├── buffer_speed_test.cc │ │ │ ├── buffer_test.cc │ │ │ ├── owned_impl_test.cc │ │ │ ├── utility.h │ │ │ ├── watermark_buffer_test.cc │ │ │ └── zero_copy_input_stream_test.cc │ │ ├── common/ │ │ │ ├── BUILD │ │ │ ├── assert_test.cc │ │ │ ├── backoff_strategy_test.cc │ │ │ ├── base64_corpus/ │ │ │ │ └── singleton │ │ │ ├── base64_fuzz_test.cc │ │ │ ├── base64_test.cc │ │ │ ├── basic_resource_impl_test.cc │ │ │ ├── callback_impl_test.cc │ │ │ ├── cleanup_test.cc │ │ │ ├── fmt_test.cc │ │ │ ├── hash_corpus/ │ │ │ │ └── example │ │ │ ├── hash_fuzz_test.cc │ │ │ ├── hash_test.cc │ │ │ ├── hex_test.cc │ │ │ ├── linked_object_test.cc │ │ │ ├── lock_guard_test.cc │ │ │ ├── log_macros_test.cc │ │ │ ├── logger_corpus/ │ │ │ │ └── test │ │ │ ├── logger_fuzz_test.cc │ │ │ ├── logger_speed_test.cc │ │ │ ├── logger_test.cc │ │ │ ├── matchers_test.cc │ │ │ ├── mem_block_builder_test.cc │ │ │ ├── mutex_tracer_test.cc │ │ │ ├── perf_annotation_disabled_test.cc │ │ │ ├── perf_annotation_test.cc │ │ │ ├── phantom_test.cc │ │ │ ├── random_generator_test.cc │ │ │ ├── regex_test.cc │ │ │ ├── statusor_test.cc │ │ │ ├── stl_helpers_test.cc │ │ │ ├── thread_id_test.cc │ │ │ ├── thread_test.cc │ │ │ ├── token_bucket_impl_test.cc │ │ │ ├── utility_corpus/ │ │ │ │ └── test │ │ │ ├── utility_fuzz_test.cc │ │ │ ├── utility_speed_test.cc │ │ │ ├── utility_test.cc │ │ │ └── version_test.cc │ │ ├── config/ │ │ │ ├── BUILD │ │ │ ├── api_shadow_test.cc │ │ │ ├── api_type_oracle_test.cc │ │ │ ├── config_provider_impl_test.cc │ │ │ ├── datasource_test.cc │ │ │ ├── decoded_resource_impl_test.cc │ │ │ ├── delta_subscription_impl_test.cc │ │ │ ├── delta_subscription_state_test.cc │ │ │ ├── delta_subscription_test_harness.h │ │ │ ├── dummy_config.proto │ │ │ ├── filesystem_subscription_impl_test.cc │ │ │ ├── filesystem_subscription_test_harness.h │ │ │ ├── grpc_mux_impl_test.cc │ │ │ ├── grpc_stream_test.cc │ │ │ ├── grpc_subscription_impl_test.cc │ │ │ ├── grpc_subscription_test_harness.h │ │ │ ├── http_subscription_impl_test.cc │ │ │ ├── http_subscription_test_harness.h │ │ │ ├── metadata_test.cc │ │ │ ├── new_grpc_mux_impl_test.cc │ │ │ ├── opaque_resource_decoder_impl_test.cc │ │ │ ├── pausable_ack_queue_test.cc │ │ │ ├── registry_test.cc │ │ │ ├── runtime_utility_test.cc │ │ │ ├── subscription_factory_impl_test.cc │ │ │ ├── subscription_impl_test.cc │ │ │ ├── subscription_test_harness.h │ │ │ ├── type_to_endpoint_test.cc │ │ │ ├── udpa_context_params_test.cc │ │ │ ├── udpa_resource_test.cc │ │ │ ├── udpa_test_utility.h │ │ │ ├── utility_test.cc │ │ │ ├── version_converter.proto │ │ │ ├── version_converter_test.cc │ │ │ └── watch_map_test.cc │ │ ├── conn_pool/ │ │ │ ├── BUILD │ │ │ └── conn_pool_base_test.cc │ │ ├── crypto/ │ │ │ ├── BUILD │ │ │ ├── get_sha_256_digest_corpus/ │ │ │ │ ├── 35d26780ea66d4ffb726bbafaa9302687bda7624 │ │ │ │ ├── 58030c65410d7553b1804eb7ed64bdff1188f145 │ │ │ │ ├── 9c8bd40d34a88522d71d184c462af82e3148c02d │ │ │ │ └── e7af10a10f2540b1d1d497df2926786640285b1c │ │ │ ├── get_sha_256_digest_fuzz_test.cc │ │ │ ├── utility_test.cc │ │ │ ├── verify_signature_corpus/ │ │ │ │ ├── test_contains_sha1_wrong │ │ │ │ └── test_contains_sha256_correct │ │ │ ├── verify_signature_fuzz.proto │ │ │ ├── verify_signature_fuzz_test.cc │ │ │ └── verify_signature_fuzz_test.dict │ │ ├── event/ │ │ │ ├── BUILD │ │ │ ├── dispatcher_impl_test.cc │ │ │ ├── file_event_impl_test.cc │ │ │ └── scaled_range_timer_manager_test.cc │ │ ├── filesystem/ │ │ │ ├── BUILD │ │ │ ├── directory_test.cc │ │ │ ├── filesystem_impl_test.cc │ │ │ └── watcher_impl_test.cc │ │ ├── filter/ │ │ │ └── http/ │ │ │ ├── BUILD │ │ │ └── filter_config_discovery_impl_test.cc │ │ ├── formatter/ │ │ │ ├── BUILD │ │ │ ├── substitution_format_string_test.cc │ │ │ ├── substitution_formatter_corpus/ │ │ │ │ ├── clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-4673648219652096 │ │ │ │ ├── clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5630958620901376 │ │ │ │ ├── clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5633770020929536 │ │ │ │ ├── clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5645869313687552 │ │ │ │ ├── clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5701824317751296 │ │ │ │ ├── clusterfuzz-testcase-minimized-access_log_formatter_fuzz_test-5758486359572480 │ │ │ │ ├── clusterfuzz-testcase-minimized-header_parser_fuzz_test-5633924724424704.fuzz │ │ │ │ ├── dynamic_metadata │ │ │ │ ├── empty │ │ │ │ ├── headers │ │ │ │ ├── invalid_0 │ │ │ │ ├── invalid_1 │ │ │ │ ├── invalid_10 │ │ │ │ ├── invalid_11 │ │ │ │ ├── invalid_12 │ │ │ │ ├── invalid_13 │ │ │ │ ├── invalid_14 │ │ │ │ ├── invalid_15 │ │ │ │ ├── invalid_16 │ │ │ │ ├── invalid_17 │ │ │ │ ├── invalid_18 │ │ │ │ ├── invalid_19 │ │ │ │ ├── invalid_2 │ │ │ │ ├── invalid_3 │ │ │ │ ├── invalid_4 │ │ │ │ ├── invalid_5 │ │ │ │ ├── invalid_6 │ │ │ │ ├── invalid_7 │ │ │ │ ├── invalid_8 │ │ │ │ ├── invalid_9 │ │ │ │ ├── plain_string │ │ │ │ ├── response_code │ │ │ │ ├── start_time_0 │ │ │ │ ├── start_time_1 │ │ │ │ ├── start_time_2 │ │ │ │ ├── start_time_3 │ │ │ │ └── upstream_local_address │ │ │ ├── substitution_formatter_fuzz.proto │ │ │ ├── substitution_formatter_fuzz_test.cc │ │ │ ├── substitution_formatter_fuzz_test.dict │ │ │ ├── substitution_formatter_speed_test.cc │ │ │ └── substitution_formatter_test.cc │ │ ├── grpc/ │ │ │ ├── BUILD │ │ │ ├── async_client_impl_test.cc │ │ │ ├── async_client_manager_impl_test.cc │ │ │ ├── codec_corpus/ │ │ │ │ └── empty │ │ │ ├── codec_fuzz_test.cc │ │ │ ├── codec_test.cc │ │ │ ├── common_test.cc │ │ │ ├── context_impl_test.cc │ │ │ ├── google_async_client_impl_test.cc │ │ │ ├── google_grpc_creds_test.cc │ │ │ ├── google_grpc_utils_test.cc │ │ │ ├── grpc_client_integration.h │ │ │ ├── grpc_client_integration_test.cc │ │ │ ├── grpc_client_integration_test_harness.h │ │ │ ├── service_key.json │ │ │ └── utility.h │ │ ├── html/ │ │ │ ├── BUILD │ │ │ └── utility_test.cc │ │ ├── http/ │ │ │ ├── BUILD │ │ │ ├── async_client_impl_test.cc │ │ │ ├── async_client_utility_test.cc │ │ │ ├── codec_client_test.cc │ │ │ ├── codec_impl_corpus/ │ │ │ │ ├── 100-continue │ │ │ │ ├── absolute_url_bad │ │ │ │ ├── absolute_url_disallow │ │ │ │ ├── absolute_url_ok │ │ │ │ ├── chunked │ │ │ │ ├── clusterfuzz-testcase-codec_impl_fuzz_test-5687788200001536 │ │ │ │ ├── clusterfuzz-testcase-codec_impl_fuzz_test-5692024096817152 │ │ │ │ ├── clusterfuzz-testcase-codec_impl_fuzz_test-5723814130876416 │ │ │ │ ├── clusterfuzz-testcase-codec_impl_fuzz_test-5750359880892416 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5102523695497216 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5107763548520448 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5629973466710016 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5635096546639872 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5635865126895616 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5650111579815936 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5657409819770880 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5658640424370176 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5693519941861376 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5698895985508352 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5699757025263616 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5720162173452288 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5722972495544320 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5726642969772032 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5728207897624576 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5731902089592832 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-5748356020699136 │ │ │ │ ├── clusterfuzz-testcase-minimized-codec_impl_fuzz_test-6299606751641600 │ │ │ │ ├── connect │ │ │ │ ├── empty │ │ │ │ ├── example │ │ │ │ ├── goaway │ │ │ │ ├── h1_dispatch_after_reset │ │ │ │ ├── head │ │ │ │ ├── http_10 │ │ │ │ ├── metadata │ │ │ │ ├── metadata_corrupt │ │ │ │ ├── metadata_dispatch │ │ │ │ ├── method_connect │ │ │ │ ├── multi_stream │ │ │ │ ├── protocol_exception │ │ │ │ ├── read_disable │ │ │ │ ├── reset_stream │ │ │ │ ├── response_204_A │ │ │ │ ├── response_204_B │ │ │ │ ├── simple_stream │ │ │ │ ├── swap_buffer │ │ │ │ └── upgrade │ │ │ ├── codec_impl_fuzz.proto │ │ │ ├── codec_impl_fuzz_test.cc │ │ │ ├── codec_wrappers_test.cc │ │ │ ├── codes_speed_test.cc │ │ │ ├── codes_test.cc │ │ │ ├── common.cc │ │ │ ├── common.h │ │ │ ├── conn_manager_impl_common.h │ │ │ ├── conn_manager_impl_corpus/ │ │ │ │ ├── clusterfuzz-testcase-continueandendstream-endstream │ │ │ │ ├── clusterfuzz-testcase-failed-dispatch │ │ │ │ ├── clusterfuzz-testcase-invalidhost │ │ │ │ ├── clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5638706466652160 │ │ │ │ ├── clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5669833168912384 │ │ │ │ ├── clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5674283828772864 │ │ │ │ ├── clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5679723404328960 │ │ │ │ ├── clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5687458439102464 │ │ │ │ ├── clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5701624673861632 │ │ │ │ ├── clusterfuzz-testcase-minimized-conn_manager_impl_fuzz_test-5714279517126656 │ │ │ │ ├── codec_exception │ │ │ │ ├── empty │ │ │ │ ├── example │ │ │ │ ├── invalid_host │ │ │ │ ├── missing_host │ │ │ │ ├── regression_test_reuse_codec │ │ │ │ ├── state_local_complete │ │ │ │ ├── status_163 │ │ │ │ └── upgrade_test_case │ │ │ ├── conn_manager_impl_fuzz.proto │ │ │ ├── conn_manager_impl_fuzz_test.cc │ │ │ ├── conn_manager_impl_test.cc │ │ │ ├── conn_manager_impl_test_2.cc │ │ │ ├── conn_manager_impl_test_base.cc │ │ │ ├── conn_manager_impl_test_base.h │ │ │ ├── conn_manager_utility_test.cc │ │ │ ├── date_provider_impl_test.cc │ │ │ ├── filter_manager_test.cc │ │ │ ├── header_map_impl_corpus/ │ │ │ │ ├── appendheader │ │ │ │ ├── clusterfuzz-testcase-header_map_impl_fuzz_test-5633882138869760-prefix │ │ │ │ ├── clusterfuzz-testcase-minimized-header_map_impl_fuzz_test-5182326490791936 │ │ │ │ ├── clusterfuzz-testcase-minimized-header_map_impl_fuzz_test-5689833624698880 │ │ │ │ ├── clusterfuzz-testcase-minimized-header_map_impl_fuzz_test-6363647045533696 │ │ │ │ ├── crash-5fb09ca426eb21db14151b94fd74d418b49042e4 │ │ │ │ ├── empty │ │ │ │ ├── example │ │ │ │ └── example_lazymap │ │ │ ├── header_map_impl_fuzz.proto │ │ │ ├── header_map_impl_fuzz_test.cc │ │ │ ├── header_map_impl_speed_test.cc │ │ │ ├── header_map_impl_test.cc │ │ │ ├── header_utility_test.cc │ │ │ ├── http1/ │ │ │ │ ├── BUILD │ │ │ │ ├── codec_impl_test.cc │ │ │ │ ├── conn_pool_test.cc │ │ │ │ └── header_formatter_test.cc │ │ │ ├── http2/ │ │ │ │ ├── BUILD │ │ │ │ ├── codec_impl_test.cc │ │ │ │ ├── codec_impl_test_util.h │ │ │ │ ├── conn_pool_test.cc │ │ │ │ ├── frame_replay.cc │ │ │ │ ├── frame_replay.h │ │ │ │ ├── frame_replay_test.cc │ │ │ │ ├── http2_frame.cc │ │ │ │ ├── http2_frame.h │ │ │ │ ├── http2_frame_test.cc │ │ │ │ ├── metadata_encoder_decoder_test.cc │ │ │ │ ├── protocol_constraints_test.cc │ │ │ │ ├── request_header_corpus/ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-request_header_fuzz_test-4795710559223808.fuzz │ │ │ │ │ ├── crash-da39a3ee5e6b4b0d3255bfef95601890afd80709 │ │ │ │ │ ├── simple_example_huffman │ │ │ │ │ └── simple_example_plain │ │ │ │ ├── request_header_fuzz_test.cc │ │ │ │ ├── response_header_corpus/ │ │ │ │ │ ├── set_details_twice │ │ │ │ │ ├── simple_example_huffman │ │ │ │ │ └── simple_example_plain │ │ │ │ └── response_header_fuzz_test.cc │ │ │ ├── path_utility_corpus/ │ │ │ │ ├── Testcase_1 │ │ │ │ ├── Testcase_2 │ │ │ │ ├── Testcase_3 │ │ │ │ ├── Testcase_4 │ │ │ │ └── clusterfuzz-testcase-minimized-path_utility_fuzz_test-5770162224234496 │ │ │ ├── path_utility_fuzz.proto │ │ │ ├── path_utility_fuzz_test.cc │ │ │ ├── path_utility_test.cc │ │ │ ├── request_id_extension_uuid_impl_test.cc │ │ │ ├── status_test.cc │ │ │ ├── user_agent_test.cc │ │ │ ├── utility_corpus/ │ │ │ │ ├── clusterfuzz-testcase-minimized-utility_fuzz_test-5091558495092736 │ │ │ │ ├── clusterfuzz-testcase-minimized-utility_fuzz_test-5653272551751680 │ │ │ │ ├── clusterfuzz-testcase-utility_fuzz_test-5206456636276736 │ │ │ │ ├── clusterfuzz-testcase-utility_fuzz_test-5735325211557888 │ │ │ │ ├── extract_host_path_from_uri_0 │ │ │ │ ├── extract_host_path_from_uri_1 │ │ │ │ ├── extract_host_path_from_uri_2 │ │ │ │ ├── extract_host_path_from_uri_3 │ │ │ │ ├── extract_host_path_from_uri_4 │ │ │ │ ├── extract_host_path_from_uri_5 │ │ │ │ ├── extract_host_path_from_uri_6 │ │ │ │ ├── find_query_string_0 │ │ │ │ ├── find_query_string_1 │ │ │ │ ├── find_query_string_2 │ │ │ │ ├── find_query_string_3 │ │ │ │ ├── get_last_address_from_xff_0 │ │ │ │ ├── get_last_address_from_xff_1 │ │ │ │ ├── get_last_address_from_xff_2 │ │ │ │ ├── get_last_address_from_xff_3 │ │ │ │ ├── get_last_address_from_xff_4 │ │ │ │ ├── get_last_address_from_xff_5 │ │ │ │ ├── has_set_cookie_0 │ │ │ │ ├── has_set_cookie_1 │ │ │ │ ├── has_set_cookie_2 │ │ │ │ ├── has_set_cookie_3 │ │ │ │ ├── has_set_cookie_4 │ │ │ │ ├── make_set_cookie_value_0 │ │ │ │ ├── make_set_cookie_value_1 │ │ │ │ ├── make_set_cookie_value_2 │ │ │ │ ├── make_set_cookie_value_3 │ │ │ │ ├── parse_authority_string_0 │ │ │ │ ├── parse_authority_string_1 │ │ │ │ ├── parse_authority_string_2 │ │ │ │ ├── parse_authority_string_3 │ │ │ │ ├── parse_authority_string_4 │ │ │ │ ├── parse_cookie_value_0 │ │ │ │ ├── parse_cookie_value_1 │ │ │ │ ├── parse_cookie_value_2 │ │ │ │ ├── parse_cookie_value_3 │ │ │ │ ├── parse_cookie_value_4 │ │ │ │ ├── parse_cookie_value_5 │ │ │ │ ├── parse_cookie_value_6 │ │ │ │ ├── parse_cookie_value_7 │ │ │ │ ├── parse_cookie_value_8 │ │ │ │ ├── parse_parameters_0 │ │ │ │ ├── parse_parameters_1 │ │ │ │ ├── parse_parameters_2 │ │ │ │ ├── parse_parameters_3 │ │ │ │ ├── parse_query_string_0 │ │ │ │ ├── parse_query_string_1 │ │ │ │ ├── parse_query_string_2 │ │ │ │ ├── parse_query_string_3 │ │ │ │ ├── parse_query_string_4 │ │ │ │ ├── parse_query_string_5 │ │ │ │ ├── parse_query_string_6 │ │ │ │ ├── parse_query_string_7 │ │ │ │ ├── percent_decoding_string_0 │ │ │ │ ├── percent_decoding_string_1 │ │ │ │ ├── percent_decoding_string_2 │ │ │ │ ├── percent_encoding_string_0 │ │ │ │ ├── percent_encoding_string_1 │ │ │ │ ├── percent_encoding_string_2 │ │ │ │ └── valid │ │ │ ├── utility_fuzz.proto │ │ │ ├── utility_fuzz_test.cc │ │ │ └── utility_test.cc │ │ ├── init/ │ │ │ ├── BUILD │ │ │ ├── manager_impl_test.cc │ │ │ ├── target_impl_test.cc │ │ │ └── watcher_impl_test.cc │ │ ├── json/ │ │ │ ├── BUILD │ │ │ ├── config_schemas_test_data/ │ │ │ │ ├── BUILD │ │ │ │ ├── README.md │ │ │ │ ├── generate_test_data.py │ │ │ │ ├── test_access_log_schema.py │ │ │ │ ├── test_cluster_schema.py │ │ │ │ ├── test_http_conn_network_filter_schema.py │ │ │ │ ├── test_http_router_schema.py │ │ │ │ ├── test_listener_schema.py │ │ │ │ ├── test_route_configuration_schema.py │ │ │ │ ├── test_route_entry_schema.py │ │ │ │ ├── test_top_level_config_schema.py │ │ │ │ └── util.py │ │ │ ├── json_corpus/ │ │ │ │ ├── basic_descriptors.json │ │ │ │ ├── basic_double.json │ │ │ │ ├── basic_double_null.json │ │ │ │ ├── basic_empty.json │ │ │ │ ├── basic_empty_braces.json │ │ │ │ ├── basic_empty_inner.json │ │ │ │ ├── basic_failure.json │ │ │ │ ├── basic_hello_bool.json │ │ │ │ ├── basic_hello_int.json │ │ │ │ ├── basic_nested_int_list.json │ │ │ │ ├── basic_unterminated.json │ │ │ │ ├── clusterfuzz-testcase-minimized-json_fuzz_test-5724109283786752 │ │ │ │ ├── deep_recursion.json │ │ │ │ ├── double_some_values.json │ │ │ │ ├── int_max_min.json │ │ │ │ ├── int_too_high.json │ │ │ │ ├── int_too_low.json │ │ │ │ ├── missing_enclosing_document.json │ │ │ │ └── some_complex_example.json │ │ │ ├── json_fuzz_test.cc │ │ │ └── json_loader_test.cc │ │ ├── local_reply/ │ │ │ ├── BUILD │ │ │ └── local_reply_test.cc │ │ ├── memory/ │ │ │ ├── BUILD │ │ │ ├── debug_test.cc │ │ │ └── heap_shrinker_test.cc │ │ ├── network/ │ │ │ ├── BUILD │ │ │ ├── addr_family_aware_socket_option_impl_test.cc │ │ │ ├── address_impl_speed_test.cc │ │ │ ├── address_impl_test.cc │ │ │ ├── apple_dns_impl_test.cc │ │ │ ├── cidr_range_test.cc │ │ │ ├── connection_impl_test.cc │ │ │ ├── dns_impl_test.cc │ │ │ ├── filter_manager_impl_test.cc │ │ │ ├── filter_matcher_test.cc │ │ │ ├── io_socket_handle_impl_test.cc │ │ │ ├── lc_trie_speed_test.cc │ │ │ ├── lc_trie_test.cc │ │ │ ├── listen_socket_impl_test.cc │ │ │ ├── listener_impl_test.cc │ │ │ ├── listener_impl_test_base.h │ │ │ ├── resolver_impl_test.cc │ │ │ ├── socket_option_factory_test.cc │ │ │ ├── socket_option_impl_test.cc │ │ │ ├── socket_option_test.h │ │ │ ├── transport_socket_options_impl_test.cc │ │ │ ├── udp_listener_impl_batch_writer_test.cc │ │ │ ├── udp_listener_impl_test.cc │ │ │ ├── udp_listener_impl_test_base.h │ │ │ ├── utility_corpus/ │ │ │ │ └── test │ │ │ ├── utility_fuzz_test.cc │ │ │ └── utility_test.cc │ │ ├── protobuf/ │ │ │ ├── BUILD │ │ │ ├── message_validator_impl_test.cc │ │ │ ├── type_util_test.cc │ │ │ ├── utility_test.cc │ │ │ ├── value_util_corpus/ │ │ │ │ ├── empty │ │ │ │ ├── string_value │ │ │ │ └── struct_value │ │ │ └── value_util_fuzz_test.cc │ │ ├── router/ │ │ │ ├── BUILD │ │ │ ├── config_impl_headermap_benchmark_test.cc │ │ │ ├── config_impl_speed_test.cc │ │ │ ├── config_impl_test.cc │ │ │ ├── corpus_from_config_impl.sh │ │ │ ├── header_formatter_test.cc │ │ │ ├── header_parser_corpus/ │ │ │ │ ├── address_0 │ │ │ │ ├── address_1 │ │ │ │ ├── address_2 │ │ │ │ ├── address_3 │ │ │ │ ├── clusterfuzz-testcase-header_parser_fuzz_test-5107723602493440 │ │ │ │ ├── clusterfuzz-testcase-header_parser_fuzz_test-5163306626580480 │ │ │ │ ├── clusterfuzz-testcase-header_parser_fuzz_test-5648325682921472 │ │ │ │ ├── clusterfuzz-testcase-header_parser_fuzz_test-5702537941876736 │ │ │ │ ├── clusterfuzz-testcase-header_parser_fuzz_test-5710655463620608 │ │ │ │ ├── clusterfuzz-testcase-header_parser_fuzz_test-6195059702628352 │ │ │ │ ├── clusterfuzz-testcase-minimized-header_parser_fuzz_test-4709439954485248 │ │ │ │ ├── clusterfuzz-testcase-minimized-header_parser_fuzz_test-5191408676241408 │ │ │ │ ├── clusterfuzz-testcase-minimized-header_parser_fuzz_test-5201773654704128 │ │ │ │ ├── clusterfuzz-testcase-minimized-header_parser_fuzz_test-5630125928873984 │ │ │ │ ├── clusterfuzz-testcase-minimized-header_parser_fuzz_test-5647641023610880 │ │ │ │ ├── compound_headers │ │ │ │ ├── crash-af52fc744a3a7d7c9fe632ca457830ca323023bd │ │ │ │ ├── foo │ │ │ │ ├── invalid_0 │ │ │ │ ├── invalid_1 │ │ │ │ ├── invalid_10 │ │ │ │ ├── invalid_11 │ │ │ │ ├── invalid_2 │ │ │ │ ├── invalid_3 │ │ │ │ ├── invalid_4 │ │ │ │ ├── invalid_5 │ │ │ │ ├── invalid_6 │ │ │ │ ├── invalid_7 │ │ │ │ ├── invalid_8 │ │ │ │ ├── invalid_9 │ │ │ │ ├── protocol │ │ │ │ ├── start_time │ │ │ │ ├── timeout_test_case │ │ │ │ ├── upstream_metadata_0 │ │ │ │ ├── upstream_metadata_1 │ │ │ │ └── valid │ │ │ ├── header_parser_fuzz.proto │ │ │ ├── header_parser_fuzz_test.cc │ │ │ ├── rds_impl_test.cc │ │ │ ├── reset_header_parser_test.cc │ │ │ ├── retry_state_impl_test.cc │ │ │ ├── route_corpus/ │ │ │ │ ├── Response_headers_to_remove │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-4592245302362112 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-4701452596674560.fuzz │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-4803620674732032.fuzz │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5074413991231488.fuzz │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5077190058704896 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5118898564497408.fuzz │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5142800207708160 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5198208916520960 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5206842068697088 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5634743613259776 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5635252339343360 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5650952886943744 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5654717359718400 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5661762636742656 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5699465522970624 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5731276071370752 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5748492233605120 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-5750746072481792 │ │ │ │ ├── clusterfuzz-testcase-minimized-route_fuzz_test-6249350586171392 │ │ │ │ ├── clusterfuzz-testcase-route_fuzz_test-5084150522707968.fuzz │ │ │ │ ├── clusterfuzz-testcase-route_fuzz_test-5088096376324096 │ │ │ │ ├── clusterfuzz-testcase-route_fuzz_test-5137346677178368 │ │ │ │ ├── clusterfuzz-testcase-route_fuzz_test-5647162250625024 │ │ │ │ ├── clusterfuzz-testcase-route_fuzz_test-5671270751141888 │ │ │ │ ├── config_impl_test_0 │ │ │ │ ├── empty │ │ │ │ ├── internal_redirect_nullderef │ │ │ │ ├── regex │ │ │ │ └── valid_headers_to_remove │ │ │ ├── route_fuzz.proto │ │ │ ├── route_fuzz_test.cc │ │ │ ├── router_fuzz.proto │ │ │ ├── router_ratelimit_test.cc │ │ │ ├── router_test.cc │ │ │ ├── router_upstream_log_test.cc │ │ │ ├── scoped_config_impl_test.cc │ │ │ ├── scoped_rds_test.cc │ │ │ ├── shadow_writer_impl_test.cc │ │ │ ├── string_accessor_impl_test.cc │ │ │ ├── upstream_request_test.cc │ │ │ └── vhds_test.cc │ │ ├── runtime/ │ │ │ ├── BUILD │ │ │ ├── filesystem_setup.sh │ │ │ ├── runtime_flag_override_noop_test.cc │ │ │ ├── runtime_flag_override_test.cc │ │ │ ├── runtime_impl_test.cc │ │ │ ├── runtime_protos_test.cc │ │ │ ├── test_data/ │ │ │ │ └── root/ │ │ │ │ ├── envoy/ │ │ │ │ │ ├── file1 │ │ │ │ │ ├── file10 │ │ │ │ │ ├── file11 │ │ │ │ │ ├── file12 │ │ │ │ │ ├── file13 │ │ │ │ │ ├── file14 │ │ │ │ │ ├── file15 │ │ │ │ │ ├── file2 │ │ │ │ │ ├── file3 │ │ │ │ │ ├── file4 │ │ │ │ │ ├── file5 │ │ │ │ │ ├── file6 │ │ │ │ │ ├── file7 │ │ │ │ │ ├── file8 │ │ │ │ │ ├── file9 │ │ │ │ │ ├── file_with_double │ │ │ │ │ ├── file_with_double_comment │ │ │ │ │ ├── file_with_double_newlines │ │ │ │ │ ├── file_with_large_integer │ │ │ │ │ ├── file_with_negative_double │ │ │ │ │ ├── file_with_words │ │ │ │ │ └── subdir/ │ │ │ │ │ └── file │ │ │ │ └── envoy_override/ │ │ │ │ └── file1 │ │ │ └── utility.h │ │ ├── secret/ │ │ │ ├── BUILD │ │ │ ├── sds_api_test.cc │ │ │ └── secret_manager_impl_test.cc │ │ ├── shared_pool/ │ │ │ ├── BUILD │ │ │ └── shared_pool_test.cc │ │ ├── signal/ │ │ │ ├── BUILD │ │ │ └── signals_test.cc │ │ ├── singleton/ │ │ │ ├── BUILD │ │ │ ├── manager_impl_test.cc │ │ │ └── threadsafe_singleton_test.cc │ │ ├── stats/ │ │ │ ├── BUILD │ │ │ ├── allocator_impl_test.cc │ │ │ ├── histogram_impl_test.cc │ │ │ ├── isolated_store_impl_test.cc │ │ │ ├── make_elements_helper.cc │ │ │ ├── make_elements_helper.h │ │ │ ├── metric_impl_test.cc │ │ │ ├── recent_lookups_speed_test.cc │ │ │ ├── recent_lookups_test.cc │ │ │ ├── refcount_ptr_test.cc │ │ │ ├── stat_merger_corpus/ │ │ │ │ ├── clusterfuzz-testcase-minimized-stat_merger_fuzz_test-4800677542100992.fuzz │ │ │ │ ├── example1 │ │ │ │ ├── example2 │ │ │ │ ├── example3 │ │ │ │ ├── example4 │ │ │ │ └── example5 │ │ │ ├── stat_merger_fuzz_test.cc │ │ │ ├── stat_merger_test.cc │ │ │ ├── stat_test_utility.cc │ │ │ ├── stat_test_utility.h │ │ │ ├── stat_test_utility_test.cc │ │ │ ├── stats_matcher_impl_test.cc │ │ │ ├── symbol_table_corpus/ │ │ │ │ ├── clusterfuzz-testcase-minimized-symbol_table_fuzz_test-5645970620809216 │ │ │ │ ├── example1 │ │ │ │ ├── example2 │ │ │ │ ├── example3 │ │ │ │ └── jan_22.fuzz │ │ │ ├── symbol_table_fuzz_test.cc │ │ │ ├── symbol_table_impl_test.cc │ │ │ ├── symbol_table_speed_test.cc │ │ │ ├── tag_extractor_impl_test.cc │ │ │ ├── tag_producer_impl_test.cc │ │ │ ├── thread_local_store_speed_test.cc │ │ │ ├── thread_local_store_test.cc │ │ │ ├── utility_corpus/ │ │ │ │ └── test │ │ │ ├── utility_fuzz_test.cc │ │ │ └── utility_test.cc │ │ ├── stream_info/ │ │ │ ├── BUILD │ │ │ ├── filter_state_impl_test.cc │ │ │ ├── stream_info_impl_test.cc │ │ │ ├── test_int_accessor.h │ │ │ ├── test_util.h │ │ │ ├── uint32_accessor_impl_test.cc │ │ │ └── utility_test.cc │ │ ├── tcp/ │ │ │ ├── BUILD │ │ │ └── conn_pool_test.cc │ │ ├── tcp_proxy/ │ │ │ ├── BUILD │ │ │ ├── tcp_proxy_test.cc │ │ │ └── upstream_test.cc │ │ ├── thread_local/ │ │ │ ├── BUILD │ │ │ └── thread_local_impl_test.cc │ │ ├── tracing/ │ │ │ ├── BUILD │ │ │ ├── http_tracer_impl_test.cc │ │ │ └── http_tracer_manager_impl_test.cc │ │ └── upstream/ │ │ ├── BUILD │ │ ├── bounded_load_hlb_test.cc │ │ ├── cds_api_impl_test.cc │ │ ├── cluster_factory_impl_test.cc │ │ ├── cluster_manager_impl_test.cc │ │ ├── cluster_update_tracker_test.cc │ │ ├── conn_pool_map_impl_test.cc │ │ ├── edf_scheduler_test.cc │ │ ├── eds_speed_test.cc │ │ ├── eds_test.cc │ │ ├── hds_test.cc │ │ ├── health_check_corpus/ │ │ │ ├── clusterfuzz-testcase-minimized-health_check_fuzz_test-5678121129607168 │ │ │ ├── clusterfuzz-testcase-minimized-health_check_fuzz_test-5748071634567168 │ │ │ ├── custom_health_check │ │ │ ├── grpc_Success │ │ │ ├── grpc_SuccessWithAuthority │ │ │ ├── grpc_crash-33da964bf71e02e3324ceee47fbb204532817e61 │ │ │ ├── grpc_crash-50b2ffbcf518e8f078ad8ed1f9801feb89a4d158 │ │ │ ├── grpc_crash-5747b3523c44ce0a228a8d8884ed7aeea2608341 │ │ │ ├── grpc_crash-5d27a3a5fc4fa384c9cbd76f0e7a3d841083396a │ │ │ ├── grpc_crash-d9287189542575619bdf21886dd396334fded9c6 │ │ │ ├── grpc_no-trailers │ │ │ ├── http_ConnectionClose │ │ │ ├── http_Degraded │ │ │ ├── http_Disconnect │ │ │ ├── http_LargeNanos │ │ │ ├── http_RemoteCloseBetweenChecks │ │ │ ├── http_Success │ │ │ ├── http_SuccessStartFailedSuccessFirst │ │ │ ├── http_Timeout │ │ │ ├── http_TimeoutThenRemoteClose │ │ │ ├── http_TimeoutThenSuccess │ │ │ ├── http_ZeroRetryInterval │ │ │ ├── http_crash-daebc8c8bcb985b777d6fa462a265ba5cdd8b06e │ │ │ ├── http_crash-test │ │ │ ├── http_crash_1 │ │ │ ├── http_crash_2 │ │ │ ├── http_crash_3 │ │ │ ├── http_crash_4 │ │ │ ├── http_crash_5 │ │ │ ├── http_out_of_range_status │ │ │ ├── http_test-something │ │ │ ├── tcp-expect_close_test │ │ │ ├── tcp_DataWithoutReusingConnection │ │ │ ├── tcp_Success │ │ │ ├── tcp_Timeout │ │ │ ├── tcp_TimeoutThenRemoteClose │ │ │ ├── tcp_WrongData │ │ │ ├── tcp_crash-3596e4a310a1c131312ba869578be28a86a0439b │ │ │ ├── tcp_crash-449c4bf2d000d6e56b782fdd26a86e20a7f87b4f │ │ │ ├── tcp_crash-e899b54d3e39838939bdde4000acbe8bcc8c37b9 │ │ │ ├── tcp_crash-test │ │ │ ├── tcp_crash-test-1 │ │ │ ├── tcp_crash_test │ │ │ ├── tcp_expect_close_test │ │ │ └── tcp_expect_close_test_2 │ │ ├── health_check_fuzz.cc │ │ ├── health_check_fuzz.h │ │ ├── health_check_fuzz.proto │ │ ├── health_check_fuzz_test.cc │ │ ├── health_checker_impl_test.cc │ │ ├── health_checker_impl_test_utils.cc │ │ ├── health_checker_impl_test_utils.h │ │ ├── host_stats_test.cc │ │ ├── host_utility_test.cc │ │ ├── load_balancer_benchmark.cc │ │ ├── load_balancer_impl_test.cc │ │ ├── load_balancer_simulation_test.cc │ │ ├── load_stats_reporter_test.cc │ │ ├── logical_dns_cluster_test.cc │ │ ├── maglev_lb_test.cc │ │ ├── original_dst_cluster_test.cc │ │ ├── outlier_detection_impl_test.cc │ │ ├── priority_conn_pool_map_impl_test.cc │ │ ├── resource_manager_impl_test.cc │ │ ├── ring_hash_lb_test.cc │ │ ├── subset_lb_test.cc │ │ ├── test_cluster_manager.h │ │ ├── test_data/ │ │ │ ├── sds_response.json │ │ │ ├── sds_response_2.json │ │ │ ├── sds_response_3.json │ │ │ └── sds_response_weight_change.json │ │ ├── transport_socket_matcher_test.cc │ │ ├── upstream_impl_test.cc │ │ └── utility.h │ ├── config/ │ │ ├── BUILD │ │ ├── integration/ │ │ │ ├── BUILD │ │ │ ├── certs/ │ │ │ │ ├── BUILD │ │ │ │ ├── README.md │ │ │ │ ├── cacert.cfg │ │ │ │ ├── cacert.pem │ │ │ │ ├── cakey.pem │ │ │ │ ├── certs.sh │ │ │ │ ├── client_ecdsacert.pem │ │ │ │ ├── client_ecdsacert_hash.h │ │ │ │ ├── client_ecdsakey.pem │ │ │ │ ├── clientcert.cfg │ │ │ │ ├── clientcert.pem │ │ │ │ ├── clientcert_hash.h │ │ │ │ ├── clientkey.pem │ │ │ │ ├── server_ecdsa_ocsp_resp.der │ │ │ │ ├── server_ecdsacert.pem │ │ │ │ ├── server_ecdsacert_hash.h │ │ │ │ ├── server_ecdsakey.pem │ │ │ │ ├── server_ocsp_resp.der │ │ │ │ ├── servercert.cfg │ │ │ │ ├── servercert.pem │ │ │ │ ├── servercert_hash.h │ │ │ │ ├── serverkey.pem │ │ │ │ ├── upstreamcacert.cfg │ │ │ │ ├── upstreamcacert.pem │ │ │ │ ├── upstreamcakey.pem │ │ │ │ ├── upstreamcert.cfg │ │ │ │ ├── upstreamcert.pem │ │ │ │ ├── upstreamcert_hash.h │ │ │ │ ├── upstreamkey.pem │ │ │ │ ├── upstreamlocalhostcert.cfg │ │ │ │ ├── upstreamlocalhostcert.pem │ │ │ │ ├── upstreamlocalhostcert_hash.h │ │ │ │ └── upstreamlocalhostkey.pem │ │ │ ├── google_com_proxy_port_0.v2.yaml │ │ │ ├── server.yaml │ │ │ ├── server_unix_listener.yaml │ │ │ ├── server_xds.bootstrap.udpa.yaml │ │ │ ├── server_xds.bootstrap.yaml │ │ │ ├── server_xds.cds.with_unknown_field.yaml │ │ │ ├── server_xds.cds.yaml │ │ │ ├── server_xds.eds.ads_cluster.yaml │ │ │ ├── server_xds.eds.with_unknown_field.yaml │ │ │ ├── server_xds.eds.yaml │ │ │ ├── server_xds.lds.typed_struct.yaml │ │ │ ├── server_xds.lds.udpa.list_collection.yaml │ │ │ ├── server_xds.lds.with_unknown_field.typed_struct.yaml │ │ │ ├── server_xds.lds.with_unknown_field.yaml │ │ │ ├── server_xds.lds.yaml │ │ │ ├── server_xds.rds.with_unknown_field.yaml │ │ │ └── server_xds.rds.yaml │ │ ├── utility.cc │ │ └── utility.h │ ├── config_test/ │ │ ├── BUILD │ │ ├── config_test.cc │ │ ├── config_test.h │ │ ├── deprecated_configs_test.cc │ │ ├── example_configs_test.cc │ │ └── example_configs_test_setup.sh │ ├── dependencies/ │ │ ├── BUILD │ │ └── curl_test.cc │ ├── dummy_main.cc │ ├── exe/ │ │ ├── BUILD │ │ ├── build_id_test.sh │ │ ├── envoy_static_test.sh │ │ ├── main_common_test.cc │ │ ├── pie_test.sh │ │ ├── terminate_handler_test.cc │ │ └── version_out_test.sh │ ├── extensions/ │ │ ├── BUILD │ │ ├── access_loggers/ │ │ │ ├── common/ │ │ │ │ ├── BUILD │ │ │ │ └── access_log_base_test.cc │ │ │ ├── file/ │ │ │ │ ├── BUILD │ │ │ │ └── config_test.cc │ │ │ ├── grpc/ │ │ │ │ ├── BUILD │ │ │ │ ├── grpc_access_log_impl_test.cc │ │ │ │ ├── grpc_access_log_utils_test.cc │ │ │ │ ├── http_config_test.cc │ │ │ │ ├── http_grpc_access_log_impl_test.cc │ │ │ │ ├── http_grpc_access_log_integration_test.cc │ │ │ │ └── tcp_grpc_access_log_integration_test.cc │ │ │ └── wasm/ │ │ │ ├── BUILD │ │ │ ├── config_test.cc │ │ │ └── test_data/ │ │ │ ├── BUILD │ │ │ ├── test_cpp.cc │ │ │ └── test_cpp_null_plugin.cc │ │ ├── bootstrap/ │ │ │ └── wasm/ │ │ │ ├── BUILD │ │ │ ├── config_test.cc │ │ │ ├── test_data/ │ │ │ │ ├── BUILD │ │ │ │ ├── asm2wasm_cpp.cc │ │ │ │ ├── bad_signature_cpp.cc │ │ │ │ ├── emscripten_cpp.cc │ │ │ │ ├── logging_cpp.cc │ │ │ │ ├── logging_rust/ │ │ │ │ │ ├── Cargo.toml │ │ │ │ │ └── src/ │ │ │ │ │ └── lib.rs │ │ │ │ ├── missing_cpp.cc │ │ │ │ ├── segv_cpp.cc │ │ │ │ ├── speed_cpp.cc │ │ │ │ ├── speed_cpp_null_plugin.cc │ │ │ │ ├── start_cpp.cc │ │ │ │ ├── start_cpp_null_plugin.cc │ │ │ │ ├── stats_cpp.cc │ │ │ │ └── stats_cpp_null_plugin.cc │ │ │ ├── wasm_speed_test.cc │ │ │ └── wasm_test.cc │ │ ├── clusters/ │ │ │ ├── aggregate/ │ │ │ │ ├── BUILD │ │ │ │ ├── cluster_integration_test.cc │ │ │ │ ├── cluster_test.cc │ │ │ │ └── cluster_update_test.cc │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ ├── BUILD │ │ │ │ └── cluster_test.cc │ │ │ └── redis/ │ │ │ ├── BUILD │ │ │ ├── crc16_test.cc │ │ │ ├── mocks.cc │ │ │ ├── mocks.h │ │ │ ├── redis_cluster_integration_test.cc │ │ │ ├── redis_cluster_lb_test.cc │ │ │ └── redis_cluster_test.cc │ │ ├── common/ │ │ │ ├── BUILD │ │ │ ├── aws/ │ │ │ │ ├── BUILD │ │ │ │ ├── aws_metadata_fetcher_integration_test.cc │ │ │ │ ├── credentials_provider_impl_test.cc │ │ │ │ ├── credentials_provider_test.cc │ │ │ │ ├── mocks.cc │ │ │ │ ├── mocks.h │ │ │ │ ├── region_provider_impl_test.cc │ │ │ │ ├── signer_impl_test.cc │ │ │ │ └── utility_test.cc │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ ├── BUILD │ │ │ │ ├── dns_cache_impl_test.cc │ │ │ │ ├── dns_cache_resource_manager_test.cc │ │ │ │ ├── mocks.cc │ │ │ │ └── mocks.h │ │ │ ├── matcher/ │ │ │ │ ├── BUILD │ │ │ │ └── matcher_test.cc │ │ │ ├── proxy_protocol/ │ │ │ │ ├── BUILD │ │ │ │ ├── proxy_protocol_header_test.cc │ │ │ │ └── proxy_protocol_regression_test.cc │ │ │ ├── redis/ │ │ │ │ ├── BUILD │ │ │ │ ├── cluster_refresh_manager_test.cc │ │ │ │ ├── mocks.cc │ │ │ │ └── mocks.h │ │ │ ├── sqlutils/ │ │ │ │ ├── BUILD │ │ │ │ └── sqlutils_test.cc │ │ │ ├── tap/ │ │ │ │ ├── BUILD │ │ │ │ ├── admin_test.cc │ │ │ │ ├── common.cc │ │ │ │ ├── common.h │ │ │ │ └── tap_config_base_test.cc │ │ │ ├── utility_test.cc │ │ │ └── wasm/ │ │ │ ├── BUILD │ │ │ ├── test_data/ │ │ │ │ ├── BUILD │ │ │ │ ├── Makefile │ │ │ │ ├── bad_signature_cpp.cc │ │ │ │ ├── test_context_cpp.cc │ │ │ │ ├── test_context_cpp_null_plugin.cc │ │ │ │ ├── test_cpp.cc │ │ │ │ ├── test_cpp_null_plugin.cc │ │ │ │ └── test_rust.rs │ │ │ ├── wasm_speed_test.cc │ │ │ ├── wasm_test.cc │ │ │ └── wasm_vm_test.cc │ │ ├── compression/ │ │ │ └── gzip/ │ │ │ ├── BUILD │ │ │ ├── compressor/ │ │ │ │ ├── BUILD │ │ │ │ └── zlib_compressor_impl_test.cc │ │ │ ├── compressor_corpus/ │ │ │ │ ├── clusterfuzz-testcase-minimized-compressor_fuzz_test-5149986500640768 │ │ │ │ ├── clusterfuzz-testcase-minimized-compressor_fuzz_test-5407695477932032 │ │ │ │ ├── clusterfuzz-testcase-minimized-compressor_fuzz_test-5644831560302592 │ │ │ │ ├── clusterfuzz-testcase-minimized-compressor_fuzz_test-6005942746873856 │ │ │ │ ├── empty │ │ │ │ ├── noise │ │ │ │ └── simple │ │ │ ├── compressor_fuzz_test.cc │ │ │ └── decompressor/ │ │ │ ├── BUILD │ │ │ └── zlib_decompressor_impl_test.cc │ │ ├── extensions_build_system.bzl │ │ ├── filters/ │ │ │ ├── common/ │ │ │ │ ├── expr/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── context_test.cc │ │ │ │ │ ├── evaluator_corpus/ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-evaluator_fuzz_test-4803938816884736.fuzz │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-evaluator_fuzz_test-5723735986536448 │ │ │ │ │ │ ├── crash-67e48e44650e25b93159729a7a4dd386625bb5c2 │ │ │ │ │ │ ├── crash-87e3c780acf4403ddd8b182496e6cad5ac5efd66 │ │ │ │ │ │ ├── crash-d6a9858c9b8e8b60845af9f5adc9eaead58147bd │ │ │ │ │ │ ├── emptystruct │ │ │ │ │ │ ├── errorcondition │ │ │ │ │ │ ├── example │ │ │ │ │ │ ├── example1 │ │ │ │ │ │ ├── headercondition │ │ │ │ │ │ ├── metadatacondition │ │ │ │ │ │ └── mistypedcondition │ │ │ │ │ ├── evaluator_fuzz.proto │ │ │ │ │ └── evaluator_fuzz_test.cc │ │ │ │ ├── ext_authz/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── check_request_utils_test.cc │ │ │ │ │ ├── ext_authz_grpc_impl_test.cc │ │ │ │ │ ├── ext_authz_http_impl_test.cc │ │ │ │ │ ├── mocks.cc │ │ │ │ │ ├── mocks.h │ │ │ │ │ ├── test_common.cc │ │ │ │ │ └── test_common.h │ │ │ │ ├── fault/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── fault_config_test.cc │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── local_ratelimit_test.cc │ │ │ │ ├── lua/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── lua_test.cc │ │ │ │ │ ├── lua_wrappers.h │ │ │ │ │ └── wrappers_test.cc │ │ │ │ ├── original_src/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── original_src_socket_option_test.cc │ │ │ │ ├── ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── mocks.cc │ │ │ │ │ ├── mocks.h │ │ │ │ │ ├── ratelimit_impl_test.cc │ │ │ │ │ └── utils.h │ │ │ │ └── rbac/ │ │ │ │ ├── BUILD │ │ │ │ ├── engine_impl_test.cc │ │ │ │ ├── matchers_test.cc │ │ │ │ ├── mocks.h │ │ │ │ └── utility_test.cc │ │ │ ├── http/ │ │ │ │ ├── adaptive_concurrency/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── adaptive_concurrency_filter_integration_test.cc │ │ │ │ │ ├── adaptive_concurrency_filter_integration_test.h │ │ │ │ │ ├── adaptive_concurrency_filter_test.cc │ │ │ │ │ └── controller/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── gradient_controller_test.cc │ │ │ │ ├── admission_control/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── admission_control_filter_test.cc │ │ │ │ │ ├── admission_control_integration_test.cc │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── controller_test.cc │ │ │ │ │ └── success_criteria_evaluator_test.cc │ │ │ │ ├── aws_lambda/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── arn_test.cc │ │ │ │ │ ├── aws_lambda_filter_integration_test.cc │ │ │ │ │ ├── aws_lambda_filter_test.cc │ │ │ │ │ └── config_test.cc │ │ │ │ ├── aws_request_signing/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── aws_request_signing_filter_test.cc │ │ │ │ │ └── config_test.cc │ │ │ │ ├── buffer/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── buffer_filter_integration_test.cc │ │ │ │ │ ├── buffer_filter_test.cc │ │ │ │ │ └── config_test.cc │ │ │ │ ├── cache/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── cache_filter_integration_test.cc │ │ │ │ │ ├── cache_filter_test.cc │ │ │ │ │ ├── cache_headers_utils_test.cc │ │ │ │ │ ├── cacheability_utils_test.cc │ │ │ │ │ ├── common.h │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── http_cache_test.cc │ │ │ │ │ └── simple_http_cache/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── simple_http_cache_test.cc │ │ │ │ ├── cdn_loop/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── filter_integration_test.cc │ │ │ │ │ ├── filter_test.cc │ │ │ │ │ ├── parser_corpus/ │ │ │ │ │ │ ├── ParseCdnInfo-InvalidParameter.txt │ │ │ │ │ │ ├── ParseCdnInfo-MissingParameter.txt │ │ │ │ │ │ ├── ParseCdnInfo-MultipleParametersWithWhitespace.txt │ │ │ │ │ │ ├── ParseCdnInfo-SingleParameter.txt │ │ │ │ │ │ ├── ParseCdnInfo-SingleParameterExtraWhitespace.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-ExtraWhiteSpace.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-InvalidCdnId.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-InvalidParseNoComma.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-Rfc7230Section7Tests-1.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-Rfc7230Section7Tests-2.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-Rfc7230Section7Tests-3.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-Rfc7230Section7Tests-4-empty.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-Rfc7230Section7Tests-5.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-Rfc7230Section7Tests-6.txt │ │ │ │ │ │ ├── ParseCdnInfoTest-Simple.txt │ │ │ │ │ │ └── rfc8586-example.txt │ │ │ │ │ ├── parser_fuzz_test.cc │ │ │ │ │ ├── parser_test.cc │ │ │ │ │ └── utils_test.cc │ │ │ │ ├── common/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── compressor/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── compressor_filter_speed_test.cc │ │ │ │ │ │ └── compressor_filter_test.cc │ │ │ │ │ ├── empty_http_filter_config.h │ │ │ │ │ ├── fuzz/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── filter_corpus/ │ │ │ │ │ │ │ ├── adminnullptr │ │ │ │ │ │ │ ├── buffer1 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-filter_fuzz_test-5082368313655296 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-filter_fuzz_test-5728684315770880 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5107908850483200.fuzz │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5115447232692224 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5143098977157120.fuzz │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5144919410999296 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5167332043522048 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5635252589690880 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5661692476522496 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5710239968264192 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5713820013297664 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5714246842449920.fuzz │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5726031248621568 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5728217898680320 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5762605081952256 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-5969746626609152 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-6133921480966144 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-6246534715539456 │ │ │ │ │ │ │ ├── clusterfuzz-testcase-minimized-filter_fuzz_test-6506457133219840 │ │ │ │ │ │ │ ├── crash-3014465358f0947e73ac12ccb40b299d5b0646b3 │ │ │ │ │ │ │ ├── crash-7137be4f227ac0faa82d76aa9b4c32a68e4c15f9 │ │ │ │ │ │ │ ├── crash-803e5cd629426a361101632f37b4651ef595c92e │ │ │ │ │ │ │ ├── crash-a45927a3f6e2efcbdb8ba12a1816895b219a09d2 │ │ │ │ │ │ │ ├── crash-bb74d7280823776808e881b20c0a9c87f7a2163b │ │ │ │ │ │ │ ├── crash-da39a3ee5e6b4b0d3255bfef95601890afd80709 │ │ │ │ │ │ │ ├── crash-ee8851a25304e8515905d09019afe8798b2376ac │ │ │ │ │ │ │ ├── grpc_stats │ │ │ │ │ │ │ ├── grpc_transcoding_decode_encode │ │ │ │ │ │ │ ├── grpc_transcoding_http_data │ │ │ │ │ │ │ ├── grpc_transcoding_proto_data │ │ │ │ │ │ │ ├── jwt_connect │ │ │ │ │ │ │ ├── metadata_not_reached │ │ │ │ │ │ │ ├── not_implemented_tap │ │ │ │ │ │ │ ├── oom-da39a3ee5e6b4b0d3255bfef95601890afd80709 │ │ │ │ │ │ │ ├── router_buffering │ │ │ │ │ │ │ └── valid_jwt │ │ │ │ │ │ ├── filter_fuzz.proto │ │ │ │ │ │ ├── filter_fuzz_test.cc │ │ │ │ │ │ ├── http_filter_fuzzer.h │ │ │ │ │ │ ├── uber_filter.cc │ │ │ │ │ │ ├── uber_filter.h │ │ │ │ │ │ └── uber_per_filter.cc │ │ │ │ │ ├── jwks_fetcher_test.cc │ │ │ │ │ ├── mock.cc │ │ │ │ │ ├── mock.h │ │ │ │ │ └── utility_test.cc │ │ │ │ ├── compressor/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── compressor_filter_integration_test.cc │ │ │ │ │ ├── compressor_filter_test.cc │ │ │ │ │ ├── config_test.cc │ │ │ │ │ └── mock_compressor_library.proto │ │ │ │ ├── cors/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── cors_filter_integration_test.cc │ │ │ │ │ └── cors_filter_test.cc │ │ │ │ ├── csrf/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── csrf_filter_integration_test.cc │ │ │ │ │ └── csrf_filter_test.cc │ │ │ │ ├── decompressor/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── decompressor_filter_integration_test.cc │ │ │ │ │ └── decompressor_filter_test.cc │ │ │ │ ├── dynamic_forward_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── proxy_filter_integration_test.cc │ │ │ │ │ └── proxy_filter_test.cc │ │ │ │ ├── dynamo/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── dynamo_filter_test.cc │ │ │ │ │ ├── dynamo_request_parser_test.cc │ │ │ │ │ └── dynamo_stats_test.cc │ │ │ │ ├── ext_authz/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── ext_authz_integration_test.cc │ │ │ │ │ └── ext_authz_test.cc │ │ │ │ ├── fault/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── fault_filter_integration_test.cc │ │ │ │ │ ├── fault_filter_test.cc │ │ │ │ │ └── utility.h │ │ │ │ ├── grpc_http1_bridge/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ └── http1_bridge_filter_test.cc │ │ │ │ ├── grpc_http1_reverse_bridge/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── reverse_bridge_integration_test.cc │ │ │ │ │ └── reverse_bridge_test.cc │ │ │ │ ├── grpc_json_transcoder/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── grpc_json_transcoder_integration_test.cc │ │ │ │ │ ├── http_body_utils_test.cc │ │ │ │ │ ├── json_transcoder_filter_test.cc │ │ │ │ │ └── transcoder_input_stream_test.cc │ │ │ │ ├── grpc_stats/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config_test.cc │ │ │ │ ├── grpc_web/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── grpc_web_filter_integration_test.cc │ │ │ │ │ └── grpc_web_filter_test.cc │ │ │ │ ├── gzip/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── gzip_filter_integration_test.cc │ │ │ │ │ └── gzip_filter_test.cc │ │ │ │ ├── header_to_metadata/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ └── header_to_metadata_filter_test.cc │ │ │ │ ├── health_check/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ └── health_check_test.cc │ │ │ │ ├── ip_tagging/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── ip_tagging_filter_test.cc │ │ │ │ ├── jwt_authn/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── all_verifier_test.cc │ │ │ │ │ ├── authenticator_test.cc │ │ │ │ │ ├── extractor_test.cc │ │ │ │ │ ├── filter_config_test.cc │ │ │ │ │ ├── filter_factory_test.cc │ │ │ │ │ ├── filter_integration_test.cc │ │ │ │ │ ├── filter_test.cc │ │ │ │ │ ├── group_verifier_test.cc │ │ │ │ │ ├── jwks_cache_test.cc │ │ │ │ │ ├── matcher_test.cc │ │ │ │ │ ├── mock.h │ │ │ │ │ ├── provider_verifier_test.cc │ │ │ │ │ └── test_common.h │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ └── filter_test.cc │ │ │ │ ├── lua/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── lua_filter_test.cc │ │ │ │ │ ├── lua_integration_test.cc │ │ │ │ │ └── wrappers_test.cc │ │ │ │ ├── oauth2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── filter_test.cc │ │ │ │ │ ├── oauth_integration_test.cc │ │ │ │ │ └── oauth_test.cc │ │ │ │ ├── on_demand/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── on_demand_filter_test.cc │ │ │ │ ├── original_src/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── original_src_config_factory_test.cc │ │ │ │ │ └── original_src_test.cc │ │ │ │ ├── ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── ratelimit_headers_test.cc │ │ │ │ │ ├── ratelimit_integration_test.cc │ │ │ │ │ └── ratelimit_test.cc │ │ │ │ ├── rbac/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── mocks.h │ │ │ │ │ ├── rbac_filter_integration_test.cc │ │ │ │ │ └── rbac_filter_test.cc │ │ │ │ ├── router/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── auto_sni_integration_test.cc │ │ │ │ │ └── config_test.cc │ │ │ │ ├── squash/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── squash_filter_integration_test.cc │ │ │ │ │ └── squash_filter_test.cc │ │ │ │ ├── tap/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── common.h │ │ │ │ │ ├── tap_config_impl_test.cc │ │ │ │ │ ├── tap_filter_integration_test.cc │ │ │ │ │ └── tap_filter_test.cc │ │ │ │ └── wasm/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ ├── test_data/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── async_call_rust/ │ │ │ │ │ │ ├── Cargo.toml │ │ │ │ │ │ └── src/ │ │ │ │ │ │ └── lib.rs │ │ │ │ │ ├── body_rust/ │ │ │ │ │ │ ├── Cargo.toml │ │ │ │ │ │ └── src/ │ │ │ │ │ │ └── lib.rs │ │ │ │ │ ├── headers_rust/ │ │ │ │ │ │ ├── Cargo.toml │ │ │ │ │ │ └── src/ │ │ │ │ │ │ └── lib.rs │ │ │ │ │ ├── metadata_rust/ │ │ │ │ │ │ ├── Cargo.toml │ │ │ │ │ │ └── src/ │ │ │ │ │ │ └── lib.rs │ │ │ │ │ ├── shared_data_rust/ │ │ │ │ │ │ ├── Cargo.toml │ │ │ │ │ │ └── src/ │ │ │ │ │ │ └── lib.rs │ │ │ │ │ ├── shared_queue_rust/ │ │ │ │ │ │ ├── Cargo.toml │ │ │ │ │ │ └── src/ │ │ │ │ │ │ └── lib.rs │ │ │ │ │ ├── test.proto │ │ │ │ │ ├── test_async_call_cpp.cc │ │ │ │ │ ├── test_body_cpp.cc │ │ │ │ │ ├── test_cpp.cc │ │ │ │ │ ├── test_cpp_null_plugin.cc │ │ │ │ │ ├── test_grpc_call_cpp.cc │ │ │ │ │ ├── test_grpc_stream_cpp.cc │ │ │ │ │ ├── test_shared_data_cpp.cc │ │ │ │ │ └── test_shared_queue_cpp.cc │ │ │ │ └── wasm_filter_test.cc │ │ │ ├── listener/ │ │ │ │ ├── common/ │ │ │ │ │ └── fuzz/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── fuzzed_input_test.cc │ │ │ │ │ ├── listener_filter_fakes.cc │ │ │ │ │ ├── listener_filter_fakes.h │ │ │ │ │ ├── listener_filter_fuzzer.cc │ │ │ │ │ ├── listener_filter_fuzzer.h │ │ │ │ │ └── listener_filter_fuzzer.proto │ │ │ │ ├── http_inspector/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── http_inspector_config_test.cc │ │ │ │ │ ├── http_inspector_corpus/ │ │ │ │ │ │ ├── bad_header │ │ │ │ │ │ ├── incomplete_header │ │ │ │ │ │ ├── invalid_method │ │ │ │ │ │ ├── invalid_request │ │ │ │ │ │ ├── multiple_http10 │ │ │ │ │ │ ├── multiple_incomplete │ │ │ │ │ │ ├── valid_http10 │ │ │ │ │ │ ├── valid_http11 │ │ │ │ │ │ └── valid_http2 │ │ │ │ │ ├── http_inspector_fuzz_test.cc │ │ │ │ │ └── http_inspector_test.cc │ │ │ │ ├── original_dst/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── original_dst_corpus/ │ │ │ │ │ │ ├── invalid_scheme │ │ │ │ │ │ ├── invalid_unix │ │ │ │ │ │ ├── valid_ipv4 │ │ │ │ │ │ └── valid_ipv6 │ │ │ │ │ └── original_dst_fuzz_test.cc │ │ │ │ ├── original_src/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── original_src_config_factory_test.cc │ │ │ │ │ ├── original_src_corpus/ │ │ │ │ │ │ ├── valid_ipv4 │ │ │ │ │ │ └── valid_unix │ │ │ │ │ ├── original_src_fuzz_test.cc │ │ │ │ │ ├── original_src_fuzz_test.proto │ │ │ │ │ └── original_src_test.cc │ │ │ │ ├── proxy_protocol/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── proxy_protocol_corpus/ │ │ │ │ │ │ ├── v1_basic │ │ │ │ │ │ ├── v1_basic_v6 │ │ │ │ │ │ ├── v1_minimal │ │ │ │ │ │ ├── v2_basic │ │ │ │ │ │ ├── v2_basic_v6 │ │ │ │ │ │ ├── v2_extensions │ │ │ │ │ │ ├── v2_tlv │ │ │ │ │ │ └── v2_tlv_multiple │ │ │ │ │ ├── proxy_protocol_fuzz_test.cc │ │ │ │ │ ├── proxy_protocol_fuzz_test.proto │ │ │ │ │ └── proxy_protocol_test.cc │ │ │ │ └── tls_inspector/ │ │ │ │ ├── BUILD │ │ │ │ ├── tls_inspector_benchmark.cc │ │ │ │ ├── tls_inspector_corpus/ │ │ │ │ │ ├── multiple_reads │ │ │ │ │ ├── no_extensions │ │ │ │ │ ├── not_ssl │ │ │ │ │ ├── too_large │ │ │ │ │ ├── valid_alpn │ │ │ │ │ └── valid_sni │ │ │ │ ├── tls_inspector_fuzz_test.cc │ │ │ │ ├── tls_inspector_fuzz_test.proto │ │ │ │ ├── tls_inspector_test.cc │ │ │ │ ├── tls_utility.cc │ │ │ │ └── tls_utility.h │ │ │ ├── network/ │ │ │ │ ├── client_ssl_auth/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── client_ssl_auth_test.cc │ │ │ │ │ ├── config_test.cc │ │ │ │ │ └── test_data/ │ │ │ │ │ └── vpn_response_1.json │ │ │ │ ├── common/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── fuzz/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ ├── network_readfilter_corpus/ │ │ │ │ │ │ │ ├── client_sslL_auth_2 │ │ │ │ │ │ │ ├── client_ssl_authz_1 │ │ │ │ │ │ │ ├── direct_response_1 │ │ │ │ │ │ │ ├── direct_response_open_file │ │ │ │ │ │ │ ├── dubbo_proxy_1 │ │ │ │ │ │ │ ├── dubbo_proxy_ondata_msg_split │ │ │ │ │ │ │ ├── dubbo_proxy_ondata_twoway │ │ │ │ │ │ │ ├── dubbo_proxy_protocol_routing │ │ │ │ │ │ │ ├── dubbo_proxy_protocol_routing_failure │ │ │ │ │ │ │ ├── echo_1 │ │ │ │ │ │ │ ├── empty │ │ │ │ │ │ │ ├── ext_authz_1 │ │ │ │ │ │ │ ├── ext_authz_2 │ │ │ │ │ │ │ ├── http_connection_manager_1 │ │ │ │ │ │ │ ├── http_connection_manager_2 │ │ │ │ │ │ │ ├── kafka_1 │ │ │ │ │ │ │ ├── kafka_process_msg │ │ │ │ │ │ │ ├── kafka_request1 │ │ │ │ │ │ │ ├── kafka_unknown_request │ │ │ │ │ │ │ ├── local_ratelimit_1 │ │ │ │ │ │ │ ├── local_ratelimit_time_overflow │ │ │ │ │ │ │ ├── mongodb_proxy_1 │ │ │ │ │ │ │ ├── mongodb_proxy_2 │ │ │ │ │ │ │ ├── mysql_proxy_1 │ │ │ │ │ │ │ ├── oss_redis_proxy_stackoverflow_with_long_route_prefix │ │ │ │ │ │ │ ├── ratelimit_1 │ │ │ │ │ │ │ ├── rbac_1 │ │ │ │ │ │ │ ├── redis_proxy_1 │ │ │ │ │ │ │ ├── redis_proxy_1_auth_no_pwd_set │ │ │ │ │ │ │ ├── redis_proxy_1_auth_pwd_set │ │ │ │ │ │ │ ├── redis_proxy_1_bulk_string │ │ │ │ │ │ │ ├── redis_proxy_1_negative_large_integer │ │ │ │ │ │ │ ├── redis_proxy_1_nested_array │ │ │ │ │ │ │ ├── redis_proxy_1_null │ │ │ │ │ │ │ ├── rocketmq_proxy_crash │ │ │ │ │ │ │ ├── rocketmq_proxy_end_stream │ │ │ │ │ │ │ ├── rocketmq_proxy_invalid_header │ │ │ │ │ │ │ ├── rocketmq_proxy_on_ack_msg │ │ │ │ │ │ │ ├── rocketmq_proxy_on_get_topic_route │ │ │ │ │ │ │ ├── rocketmq_proxy_on_heartbeat │ │ │ │ │ │ │ ├── rocketmq_proxy_on_pop_msg │ │ │ │ │ │ │ ├── rocketmq_proxy_sendmsg │ │ │ │ │ │ │ ├── rocketmq_proxy_sendmsg2 │ │ │ │ │ │ │ ├── rocketmq_proxy_unregistered_client │ │ │ │ │ │ │ ├── sni_cluster_1 │ │ │ │ │ │ │ ├── sni_cluster_2 │ │ │ │ │ │ │ ├── sni_dynamic_forward_proxy_1 │ │ │ │ │ │ │ ├── thrift_proxy_1 │ │ │ │ │ │ │ ├── thrift_proxy_3 │ │ │ │ │ │ │ ├── thrift_proxy_app_exception │ │ │ │ │ │ │ ├── thrift_proxy_assert_failure │ │ │ │ │ │ │ ├── thrift_proxy_garbage_request │ │ │ │ │ │ │ ├── thrift_proxy_invalid_msg_type │ │ │ │ │ │ │ ├── thrift_proxy_on_data_handles_oneway │ │ │ │ │ │ │ ├── thrift_proxy_on_data_handles_thriftcall │ │ │ │ │ │ │ ├── thrift_proxy_pipelined_request1 │ │ │ │ │ │ │ ├── thrift_proxy_protocol_error │ │ │ │ │ │ │ ├── thrift_proxy_router_test │ │ │ │ │ │ │ ├── thrift_proxy_stop_and_resume │ │ │ │ │ │ │ ├── zookeeper_proxy_1 │ │ │ │ │ │ │ ├── zookeeper_proxy_auth │ │ │ │ │ │ │ ├── zookeeper_proxy_connect │ │ │ │ │ │ │ ├── zookeeper_proxy_multirequest │ │ │ │ │ │ │ ├── zookeeper_proxy_request_container │ │ │ │ │ │ │ ├── zookeeper_proxy_request_ephemeral │ │ │ │ │ │ │ ├── zookeeper_proxy_request_persistent │ │ │ │ │ │ │ ├── zookeeper_proxy_request_persistent_ephemeral_sequential │ │ │ │ │ │ │ ├── zookeeper_proxy_request_persistent_sequential │ │ │ │ │ │ │ ├── zookeeper_proxy_request_ttl │ │ │ │ │ │ │ ├── zookeeper_proxy_request_ttl_sequential │ │ │ │ │ │ │ └── zookeeper_proxy_watch_request │ │ │ │ │ │ ├── network_readfilter_fuzz.proto │ │ │ │ │ │ ├── network_readfilter_fuzz_test.cc │ │ │ │ │ │ ├── network_readfilter_fuzz_test.dict │ │ │ │ │ │ ├── network_writefilter_corpus/ │ │ │ │ │ │ │ ├── kafka_broker_1 │ │ │ │ │ │ │ ├── kafka_broker_process_response │ │ │ │ │ │ │ ├── kafka_broker_response1 │ │ │ │ │ │ │ ├── kafka_broker_unknown_response │ │ │ │ │ │ │ ├── mongodb_proxy_1 │ │ │ │ │ │ │ ├── mongodb_proxy_response │ │ │ │ │ │ │ ├── mysql_proxy_1 │ │ │ │ │ │ │ ├── mysql_proxy_msg_split │ │ │ │ │ │ │ ├── zookeeper_proxy_1 │ │ │ │ │ │ │ ├── zookeeper_proxy_assert_failure_onwrite │ │ │ │ │ │ │ ├── zookeeper_proxy_auth │ │ │ │ │ │ │ ├── zookeeper_proxy_connect │ │ │ │ │ │ │ ├── zookeeper_proxy_ping │ │ │ │ │ │ │ ├── zookeeper_proxy_watch_control │ │ │ │ │ │ │ └── zookeeper_proxy_watch_event │ │ │ │ │ │ ├── network_writefilter_fuzz.proto │ │ │ │ │ │ ├── network_writefilter_fuzz_test.cc │ │ │ │ │ │ ├── uber_per_readfilter.cc │ │ │ │ │ │ ├── uber_per_writefilter.cc │ │ │ │ │ │ ├── uber_readfilter.cc │ │ │ │ │ │ ├── uber_readfilter.h │ │ │ │ │ │ ├── uber_writefilter.cc │ │ │ │ │ │ ├── uber_writefilter.h │ │ │ │ │ │ └── utils/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ └── fakes.h │ │ │ │ │ ├── redis/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── client_impl_test.cc │ │ │ │ │ │ ├── codec_impl_test.cc │ │ │ │ │ │ ├── fault_test.cc │ │ │ │ │ │ ├── mocks.cc │ │ │ │ │ │ ├── mocks.h │ │ │ │ │ │ └── test_utils.h │ │ │ │ │ └── utility_test.cc │ │ │ │ ├── direct_response/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── direct_response_integration_test.cc │ │ │ │ │ └── direct_response_test.cc │ │ │ │ ├── dubbo_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── app_exception_test.cc │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── conn_manager_test.cc │ │ │ │ │ ├── decoder_test.cc │ │ │ │ │ ├── dubbo_hessian2_serializer_impl_test.cc │ │ │ │ │ ├── dubbo_protocol_impl_test.cc │ │ │ │ │ ├── hessian_utils_test.cc │ │ │ │ │ ├── metadata_test.cc │ │ │ │ │ ├── mocks.cc │ │ │ │ │ ├── mocks.h │ │ │ │ │ ├── route_matcher_test.cc │ │ │ │ │ ├── router_filter_config_test.cc │ │ │ │ │ ├── router_test.cc │ │ │ │ │ └── utility.h │ │ │ │ ├── ext_authz/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── ext_authz_corpus/ │ │ │ │ │ │ ├── crash-309531f09ce8c0c71f272c7145da9d5528c3e8fc │ │ │ │ │ │ └── crash-72c994c40b30ff66b72f401055681e9851fea7a2 │ │ │ │ │ ├── ext_authz_fuzz.proto │ │ │ │ │ ├── ext_authz_fuzz_test.cc │ │ │ │ │ └── ext_authz_test.cc │ │ │ │ ├── http_connection_manager/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config.proto │ │ │ │ │ └── config_test.cc │ │ │ │ ├── kafka/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── broker/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── config_unit_test.cc │ │ │ │ │ │ ├── filter_protocol_test.cc │ │ │ │ │ │ ├── filter_unit_test.cc │ │ │ │ │ │ └── integration_test/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ ├── envoy_config_yaml.j2 │ │ │ │ │ │ ├── kafka_broker_integration_test.py │ │ │ │ │ │ ├── kafka_server_properties.j2 │ │ │ │ │ │ └── zookeeper_properties.j2 │ │ │ │ │ ├── buffer_based_test.h │ │ │ │ │ ├── kafka_request_parser_test.cc │ │ │ │ │ ├── kafka_response_parser_test.cc │ │ │ │ │ ├── message_utilities.h │ │ │ │ │ ├── metrics_integration_test.cc │ │ │ │ │ ├── protocol/ │ │ │ │ │ │ ├── launcher.py │ │ │ │ │ │ ├── request_codec_request_test_cc.j2 │ │ │ │ │ │ ├── request_utilities_cc.j2 │ │ │ │ │ │ ├── requests_test_cc.j2 │ │ │ │ │ │ ├── response_codec_response_test_cc.j2 │ │ │ │ │ │ ├── response_utilities_cc.j2 │ │ │ │ │ │ └── responses_test_cc.j2 │ │ │ │ │ ├── request_codec_integration_test.cc │ │ │ │ │ ├── request_codec_unit_test.cc │ │ │ │ │ ├── response_codec_integration_test.cc │ │ │ │ │ ├── response_codec_unit_test.cc │ │ │ │ │ ├── serialization/ │ │ │ │ │ │ ├── launcher.py │ │ │ │ │ │ └── serialization_composite_test_cc.j2 │ │ │ │ │ ├── serialization_test.cc │ │ │ │ │ ├── serialization_utilities.cc │ │ │ │ │ └── serialization_utilities.h │ │ │ │ ├── local_ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── local_ratelimit_corpus/ │ │ │ │ │ │ └── basic_test_case │ │ │ │ │ ├── local_ratelimit_fuzz.proto │ │ │ │ │ ├── local_ratelimit_fuzz_test.cc │ │ │ │ │ ├── local_ratelimit_integration_test.cc │ │ │ │ │ └── local_ratelimit_test.cc │ │ │ │ ├── mongo_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── bson_impl_test.cc │ │ │ │ │ ├── codec_impl_test.cc │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── proxy_test.cc │ │ │ │ │ └── utility_test.cc │ │ │ │ ├── mysql_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── mysql_codec_test.cc │ │ │ │ │ ├── mysql_command_test.cc │ │ │ │ │ ├── mysql_filter_test.cc │ │ │ │ │ ├── mysql_integration_test.cc │ │ │ │ │ ├── mysql_test_config.yaml │ │ │ │ │ ├── mysql_test_utils.cc │ │ │ │ │ └── mysql_test_utils.h │ │ │ │ ├── postgres_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── postgres_decoder_test.cc │ │ │ │ │ ├── postgres_filter_test.cc │ │ │ │ │ ├── postgres_integration_test.cc │ │ │ │ │ ├── postgres_message_test.cc │ │ │ │ │ ├── postgres_test_config.yaml │ │ │ │ │ ├── postgres_test_utils.cc │ │ │ │ │ └── postgres_test_utils.h │ │ │ │ ├── ratelimit/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ └── ratelimit_test.cc │ │ │ │ ├── rbac/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── filter_test.cc │ │ │ │ │ └── integration_test.cc │ │ │ │ ├── redis_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── command_lookup_speed_test.cc │ │ │ │ │ ├── command_split_speed_test.cc │ │ │ │ │ ├── command_splitter_impl_test.cc │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── conn_pool_impl_test.cc │ │ │ │ │ ├── mocks.cc │ │ │ │ │ ├── mocks.h │ │ │ │ │ ├── proxy_filter_test.cc │ │ │ │ │ ├── redis_proxy_integration_test.cc │ │ │ │ │ └── router_impl_test.cc │ │ │ │ ├── rocketmq_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── active_message_test.cc │ │ │ │ │ ├── codec_test.cc │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── conn_manager_test.cc │ │ │ │ │ ├── mocks.cc │ │ │ │ │ ├── mocks.h │ │ │ │ │ ├── protocol_test.cc │ │ │ │ │ ├── route_matcher_test.cc │ │ │ │ │ ├── router_test.cc │ │ │ │ │ ├── topic_route_test.cc │ │ │ │ │ ├── utility.cc │ │ │ │ │ └── utility.h │ │ │ │ ├── sni_cluster/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── sni_cluster_test.cc │ │ │ │ ├── sni_dynamic_forward_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── proxy_filter_integration_test.cc │ │ │ │ │ └── proxy_filter_test.cc │ │ │ │ ├── tcp_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config_test.cc │ │ │ │ ├── thrift_proxy/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── app_exception_impl_test.cc │ │ │ │ │ ├── auto_protocol_impl_test.cc │ │ │ │ │ ├── auto_transport_impl_test.cc │ │ │ │ │ ├── binary_protocol_impl_test.cc │ │ │ │ │ ├── buffer_helper_test.cc │ │ │ │ │ ├── compact_protocol_impl_test.cc │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── conn_manager_test.cc │ │ │ │ │ ├── conn_state_test.cc │ │ │ │ │ ├── decoder_test.cc │ │ │ │ │ ├── driver/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── README.md │ │ │ │ │ │ ├── client.py │ │ │ │ │ │ ├── example.thrift │ │ │ │ │ │ ├── fbthrift/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ ├── THeaderTransport.py │ │ │ │ │ │ │ └── __init__.py │ │ │ │ │ │ ├── finagle/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ ├── TFinagleServerProcessor.py │ │ │ │ │ │ │ ├── TFinagleServerProtocol.py │ │ │ │ │ │ │ └── __init__.py │ │ │ │ │ │ ├── generate_bindings.sh │ │ │ │ │ │ ├── generate_fixture.sh │ │ │ │ │ │ ├── generated/ │ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ │ └── example/ │ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ │ ├── Example-remote │ │ │ │ │ │ │ ├── Example.py │ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ │ ├── constants.py │ │ │ │ │ │ │ └── ttypes.py │ │ │ │ │ │ └── server.py │ │ │ │ │ ├── filters/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── pass_through_filter_test.cc │ │ │ │ │ │ └── ratelimit/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── config_test.cc │ │ │ │ │ │ └── ratelimit_test.cc │ │ │ │ │ ├── framed_transport_impl_test.cc │ │ │ │ │ ├── header_transport_impl_test.cc │ │ │ │ │ ├── integration.cc │ │ │ │ │ ├── integration.h │ │ │ │ │ ├── integration_test.cc │ │ │ │ │ ├── metadata_test.cc │ │ │ │ │ ├── mocks.cc │ │ │ │ │ ├── mocks.h │ │ │ │ │ ├── requirements.txt │ │ │ │ │ ├── route_matcher_test.cc │ │ │ │ │ ├── router_ratelimit_test.cc │ │ │ │ │ ├── router_test.cc │ │ │ │ │ ├── thrift_object_impl_test.cc │ │ │ │ │ ├── translation_integration_test.cc │ │ │ │ │ ├── twitter_protocol_impl_test.cc │ │ │ │ │ ├── unframed_transport_impl_test.cc │ │ │ │ │ └── utility.h │ │ │ │ ├── wasm/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── config_test.cc │ │ │ │ │ ├── test_data/ │ │ │ │ │ │ ├── BUILD │ │ │ │ │ │ ├── logging_rust/ │ │ │ │ │ │ │ ├── Cargo.toml │ │ │ │ │ │ │ └── src/ │ │ │ │ │ │ │ └── lib.rs │ │ │ │ │ │ ├── test_cpp.cc │ │ │ │ │ │ └── test_cpp_null_plugin.cc │ │ │ │ │ └── wasm_filter_test.cc │ │ │ │ └── zookeeper_proxy/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ └── filter_test.cc │ │ │ └── udp/ │ │ │ ├── dns_filter/ │ │ │ │ ├── BUILD │ │ │ │ ├── dns_filter_corpus/ │ │ │ │ │ ├── 016fac1e4a40199b26b08df73179f9249e6a680b │ │ │ │ │ ├── 110be4738f0cc29218ba95bd16a1442b57b3caaf │ │ │ │ │ ├── 3d6e5d07c6ce8f5996f0ac001fed89f0962e50d7 │ │ │ │ │ ├── 497a3f29c3a53a65853a9e0ab3dd315fb92ac025 │ │ │ │ │ ├── 7c09f450b6667337fd111fad0049bf4601c1aece │ │ │ │ │ ├── 88c22fd07c15d34576b085cb3e869e5da9b23b3f │ │ │ │ │ ├── 95bcb3090cb222d80fa4fee7b88e84b99ae408b1 │ │ │ │ │ ├── e9c8cd789e907d07e56e7e4d998e3de6c0550b9d │ │ │ │ │ ├── f1220105b4e868a7ce4d908eefbec7f403e5ddb8 │ │ │ │ │ └── fb9282f0af3341cfc98d56f10fffffd5529d8802 │ │ │ │ ├── dns_filter_fuzz_test.cc │ │ │ │ ├── dns_filter_integration_test.cc │ │ │ │ ├── dns_filter_test.cc │ │ │ │ ├── dns_filter_test_utils.cc │ │ │ │ ├── dns_filter_test_utils.h │ │ │ │ └── dns_filter_utils_test.cc │ │ │ └── udp_proxy/ │ │ │ ├── BUILD │ │ │ ├── hash_policy_impl_test.cc │ │ │ ├── udp_proxy_filter_test.cc │ │ │ └── udp_proxy_integration_test.cc │ │ ├── grpc_credentials/ │ │ │ ├── aws_iam/ │ │ │ │ ├── BUILD │ │ │ │ └── aws_iam_grpc_credentials_test.cc │ │ │ └── file_based_metadata/ │ │ │ ├── BUILD │ │ │ └── file_based_metadata_grpc_credentials_test.cc │ │ ├── health_checkers/ │ │ │ └── redis/ │ │ │ ├── BUILD │ │ │ ├── config_test.cc │ │ │ └── redis_test.cc │ │ ├── internal_redirect/ │ │ │ └── previous_routes/ │ │ │ ├── BUILD │ │ │ └── config_test.cc │ │ ├── quic_listeners/ │ │ │ └── quiche/ │ │ │ ├── BUILD │ │ │ ├── active_quic_listener_config_test.cc │ │ │ ├── active_quic_listener_test.cc │ │ │ ├── crypto_test_utils_for_envoy.cc │ │ │ ├── envoy_quic_alarm_test.cc │ │ │ ├── envoy_quic_client_session_test.cc │ │ │ ├── envoy_quic_client_stream_test.cc │ │ │ ├── envoy_quic_dispatcher_test.cc │ │ │ ├── envoy_quic_proof_source_test.cc │ │ │ ├── envoy_quic_proof_verifier_test.cc │ │ │ ├── envoy_quic_server_session_test.cc │ │ │ ├── envoy_quic_server_stream_test.cc │ │ │ ├── envoy_quic_simulated_watermark_buffer_test.cc │ │ │ ├── envoy_quic_utils_test.cc │ │ │ ├── envoy_quic_writer_test.cc │ │ │ ├── integration/ │ │ │ │ ├── BUILD │ │ │ │ └── quic_http_integration_test.cc │ │ │ ├── platform/ │ │ │ │ ├── BUILD │ │ │ │ ├── envoy_quic_clock_test.cc │ │ │ │ ├── epoll_address_test_utils_impl.h │ │ │ │ ├── epoll_bug_impl.h │ │ │ │ ├── epoll_expect_bug_impl.h │ │ │ │ ├── epoll_export_impl.h │ │ │ │ ├── epoll_logging_impl.h │ │ │ │ ├── epoll_ptr_util_impl.h │ │ │ │ ├── epoll_test_impl.h │ │ │ │ ├── epoll_thread_impl.h │ │ │ │ ├── epoll_time_impl.h │ │ │ │ ├── http2_platform_test.cc │ │ │ │ ├── quic_epoll_clock.cc │ │ │ │ ├── quic_epoll_clock.h │ │ │ │ ├── quic_epoll_impl.h │ │ │ │ ├── quic_expect_bug_impl.h │ │ │ │ ├── quic_mock_log_impl.h │ │ │ │ ├── quic_platform_test.cc │ │ │ │ ├── quic_port_utils_impl.cc │ │ │ │ ├── quic_port_utils_impl.h │ │ │ │ ├── quic_sleep_impl.h │ │ │ │ ├── quic_system_event_loop_impl.h │ │ │ │ ├── quic_test_impl.h │ │ │ │ ├── quic_test_mem_slice_vector_impl.h │ │ │ │ ├── quic_test_output_impl.cc │ │ │ │ ├── quic_test_output_impl.h │ │ │ │ ├── quic_thread_impl.h │ │ │ │ ├── quiche_platform_test.cc │ │ │ │ ├── quiche_test_impl.h │ │ │ │ ├── spdy_platform_test.cc │ │ │ │ └── spdy_test_helpers_impl.h │ │ │ ├── quic_io_handle_wrapper_test.cc │ │ │ ├── test_proof_source.h │ │ │ ├── test_proof_verifier.h │ │ │ └── test_utils.h │ │ ├── resource_monitors/ │ │ │ ├── fixed_heap/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ └── fixed_heap_monitor_test.cc │ │ │ └── injected_resource/ │ │ │ ├── BUILD │ │ │ ├── config_test.cc │ │ │ └── injected_resource_monitor_test.cc │ │ ├── retry/ │ │ │ ├── host/ │ │ │ │ ├── omit_canary_hosts/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config_test.cc │ │ │ │ ├── omit_host_metadata/ │ │ │ │ │ ├── BUILD │ │ │ │ │ └── config_test.cc │ │ │ │ └── previous_hosts/ │ │ │ │ ├── BUILD │ │ │ │ └── config_test.cc │ │ │ └── priority/ │ │ │ └── previous_priorities/ │ │ │ ├── BUILD │ │ │ └── config_test.cc │ │ ├── stats_sinks/ │ │ │ ├── common/ │ │ │ │ └── statsd/ │ │ │ │ ├── BUILD │ │ │ │ ├── statsd_test.cc │ │ │ │ └── udp_statsd_test.cc │ │ │ ├── dog_statsd/ │ │ │ │ ├── BUILD │ │ │ │ └── config_test.cc │ │ │ ├── hystrix/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ ├── hystrix_integration_test.cc │ │ │ │ └── hystrix_test.cc │ │ │ ├── metrics_service/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ ├── grpc_metrics_service_impl_test.cc │ │ │ │ └── metrics_service_integration_test.cc │ │ │ ├── statsd/ │ │ │ │ ├── BUILD │ │ │ │ └── config_test.cc │ │ │ └── wasm/ │ │ │ ├── BUILD │ │ │ ├── config_test.cc │ │ │ ├── test_data/ │ │ │ │ ├── BUILD │ │ │ │ ├── test_context_cpp.cc │ │ │ │ └── test_context_cpp_null_plugin.cc │ │ │ └── wasm_stat_sink_test.cc │ │ ├── tracers/ │ │ │ ├── common/ │ │ │ │ └── ot/ │ │ │ │ ├── BUILD │ │ │ │ └── opentracing_driver_impl_test.cc │ │ │ ├── datadog/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ └── datadog_tracer_impl_test.cc │ │ │ ├── dynamic_ot/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ └── dynamic_opentracing_driver_impl_test.cc │ │ │ ├── lightstep/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ └── lightstep_tracer_impl_test.cc │ │ │ ├── opencensus/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ └── tracer_test.cc │ │ │ ├── xray/ │ │ │ │ ├── BUILD │ │ │ │ ├── config_test.cc │ │ │ │ ├── fuzz_test.cc │ │ │ │ ├── localized_sampling_test.cc │ │ │ │ ├── tracer_test.cc │ │ │ │ ├── util_test.cc │ │ │ │ ├── wildcard_matcher_corpus/ │ │ │ │ │ └── example │ │ │ │ └── xray_tracer_impl_test.cc │ │ │ └── zipkin/ │ │ │ ├── BUILD │ │ │ ├── config_test.cc │ │ │ ├── span_buffer_test.cc │ │ │ ├── span_context_extractor_test.cc │ │ │ ├── tracer_test.cc │ │ │ ├── zipkin_core_types_test.cc │ │ │ └── zipkin_tracer_impl_test.cc │ │ ├── transport_sockets/ │ │ │ ├── alts/ │ │ │ │ ├── BUILD │ │ │ │ ├── alts_integration_test.cc │ │ │ │ ├── config_test.cc │ │ │ │ ├── noop_transport_socket_callbacks_test.cc │ │ │ │ ├── tsi_frame_protector_test.cc │ │ │ │ ├── tsi_handshaker_test.cc │ │ │ │ └── tsi_socket_test.cc │ │ │ ├── common/ │ │ │ │ ├── BUILD │ │ │ │ └── passthrough_test.cc │ │ │ ├── proxy_protocol/ │ │ │ │ ├── BUILD │ │ │ │ ├── proxy_protocol_integration_test.cc │ │ │ │ └── proxy_protocol_test.cc │ │ │ ├── tap/ │ │ │ │ ├── BUILD │ │ │ │ └── tap_config_impl_test.cc │ │ │ └── tls/ │ │ │ ├── BUILD │ │ │ ├── context_impl_test.cc │ │ │ ├── gen_unittest_certs.sh │ │ │ ├── handshaker_test.cc │ │ │ ├── integration/ │ │ │ │ ├── BUILD │ │ │ │ ├── ssl_integration_test.cc │ │ │ │ └── ssl_integration_test.h │ │ │ ├── io_handle_bio_test.cc │ │ │ ├── ocsp/ │ │ │ │ ├── BUILD │ │ │ │ ├── asn1_utility_test.cc │ │ │ │ ├── gen_unittest_ocsp_data.sh │ │ │ │ └── ocsp_test.cc │ │ │ ├── ssl_certs_test.h │ │ │ ├── ssl_socket_test.cc │ │ │ ├── ssl_test_utility.h │ │ │ ├── test_data/ │ │ │ │ ├── BUILD │ │ │ │ ├── README.md │ │ │ │ ├── aes_128_key │ │ │ │ ├── ca_cert.cfg │ │ │ │ ├── ca_cert.crl │ │ │ │ ├── ca_cert.pem │ │ │ │ ├── ca_cert_info.h │ │ │ │ ├── ca_cert_with_crl.pem │ │ │ │ ├── ca_certificates.pem │ │ │ │ ├── ca_key.pem │ │ │ │ ├── certs.sh │ │ │ │ ├── expired_cert.pem │ │ │ │ ├── expired_cert_info.h │ │ │ │ ├── expired_key.pem │ │ │ │ ├── expired_san_uri_cert.pem │ │ │ │ ├── expired_san_uri_cert_info.h │ │ │ │ ├── expired_san_uri_key.pem │ │ │ │ ├── extensions_cert.cfg │ │ │ │ ├── extensions_cert.pem │ │ │ │ ├── extensions_cert_info.h │ │ │ │ ├── extensions_key.pem │ │ │ │ ├── fake_ca_cert.cfg │ │ │ │ ├── fake_ca_cert.pem │ │ │ │ ├── fake_ca_cert_info.h │ │ │ │ ├── fake_ca_key.pem │ │ │ │ ├── intermediate_ca_cert.cfg │ │ │ │ ├── intermediate_ca_cert.crl │ │ │ │ ├── intermediate_ca_cert.pem │ │ │ │ ├── intermediate_ca_cert_chain.crl │ │ │ │ ├── intermediate_ca_cert_chain.pem │ │ │ │ ├── intermediate_ca_cert_chain_with_crl.pem │ │ │ │ ├── intermediate_ca_cert_chain_with_crl_chain.pem │ │ │ │ ├── intermediate_ca_cert_info.h │ │ │ │ ├── intermediate_ca_key.pem │ │ │ │ ├── long_validity_cert.pem │ │ │ │ ├── long_validity_cert_info.h │ │ │ │ ├── long_validity_key.pem │ │ │ │ ├── no_san_cert.cfg │ │ │ │ ├── no_san_cert.pem │ │ │ │ ├── no_san_cert_info.h │ │ │ │ ├── no_san_chain.pem │ │ │ │ ├── no_san_key.pem │ │ │ │ ├── no_subject_cert.cfg │ │ │ │ ├── no_subject_cert.pem │ │ │ │ ├── no_subject_cert_info.h │ │ │ │ ├── no_subject_key.pem │ │ │ │ ├── not_a_crl.crl │ │ │ │ ├── password_protected_cert.pem │ │ │ │ ├── password_protected_cert_info.h │ │ │ │ ├── password_protected_key.pem │ │ │ │ ├── password_protected_password.txt │ │ │ │ ├── san_dns2_cert.pem │ │ │ │ ├── san_dns2_cert_info.h │ │ │ │ ├── san_dns2_key.pem │ │ │ │ ├── san_dns3_cert.pem │ │ │ │ ├── san_dns3_cert_info.h │ │ │ │ ├── san_dns3_chain.pem │ │ │ │ ├── san_dns3_key.pem │ │ │ │ ├── san_dns4_cert.pem │ │ │ │ ├── san_dns4_cert_info.h │ │ │ │ ├── san_dns4_key.pem │ │ │ │ ├── san_dns_cert.cfg │ │ │ │ ├── san_dns_cert.pem │ │ │ │ ├── san_dns_cert_info.h │ │ │ │ ├── san_dns_key.pem │ │ │ │ ├── san_ip_cert.cfg │ │ │ │ ├── san_ip_cert.pem │ │ │ │ ├── san_ip_cert_info.h │ │ │ │ ├── san_ip_chain.pem │ │ │ │ ├── san_ip_key.pem │ │ │ │ ├── san_multiple_dns_cert.cfg │ │ │ │ ├── san_multiple_dns_cert.pem │ │ │ │ ├── san_multiple_dns_cert_info.h │ │ │ │ ├── san_multiple_dns_key.pem │ │ │ │ ├── san_only_dns_cert.cfg │ │ │ │ ├── san_only_dns_cert.pem │ │ │ │ ├── san_only_dns_cert_info.h │ │ │ │ ├── san_only_dns_key.pem │ │ │ │ ├── san_uri_cert.cfg │ │ │ │ ├── san_uri_cert.pem │ │ │ │ ├── san_uri_cert_info.h │ │ │ │ ├── san_uri_key.pem │ │ │ │ ├── selfsigned2_cert.pem │ │ │ │ ├── selfsigned2_cert_info.h │ │ │ │ ├── selfsigned2_ecdsa_p256_cert.pem │ │ │ │ ├── selfsigned2_ecdsa_p256_cert_info.h │ │ │ │ ├── selfsigned_cert.cfg │ │ │ │ ├── selfsigned_cert.pem │ │ │ │ ├── selfsigned_cert_info.h │ │ │ │ ├── selfsigned_ecdsa_p256_cert.pem │ │ │ │ ├── selfsigned_ecdsa_p256_cert_info.h │ │ │ │ ├── selfsigned_ecdsa_p256_key.pem │ │ │ │ ├── selfsigned_ecdsa_p384_cert.pem │ │ │ │ ├── selfsigned_ecdsa_p384_cert_info.h │ │ │ │ ├── selfsigned_ecdsa_p384_key.pem │ │ │ │ ├── selfsigned_key.pem │ │ │ │ ├── selfsigned_rsa_1024_cert.pem │ │ │ │ ├── selfsigned_rsa_1024_cert_info.h │ │ │ │ ├── selfsigned_rsa_1024_key.pem │ │ │ │ ├── selfsigned_rsa_3072_cert.pem │ │ │ │ ├── selfsigned_rsa_3072_cert_info.h │ │ │ │ ├── selfsigned_rsa_3072_key.pem │ │ │ │ ├── selfsigned_rsa_4096_cert.pem │ │ │ │ ├── selfsigned_rsa_4096_cert_info.h │ │ │ │ ├── selfsigned_rsa_4096_key.pem │ │ │ │ ├── ticket_key_a │ │ │ │ ├── ticket_key_b │ │ │ │ └── ticket_key_wrong_len │ │ │ ├── test_private_key_method_provider.cc │ │ │ ├── test_private_key_method_provider.h │ │ │ └── utility_test.cc │ │ ├── upstreams/ │ │ │ └── http/ │ │ │ └── tcp/ │ │ │ ├── BUILD │ │ │ └── upstream_request_test.cc │ │ └── watchdog/ │ │ ├── abort_action/ │ │ │ ├── BUILD │ │ │ ├── abort_action_test.cc │ │ │ └── config_test.cc │ │ └── profile_action/ │ │ ├── BUILD │ │ ├── config_test.cc │ │ └── profile_action_test.cc │ ├── fuzz/ │ │ ├── BUILD │ │ ├── README.md │ │ ├── common.proto │ │ ├── fuzz_runner.cc │ │ ├── fuzz_runner.h │ │ ├── headers.dict │ │ ├── main.cc │ │ └── utility.h │ ├── integration/ │ │ ├── BUILD │ │ ├── README.md │ │ ├── ads_integration.cc │ │ ├── ads_integration.h │ │ ├── ads_integration_test.cc │ │ ├── alpn_selection_integration_test.cc │ │ ├── api_listener_integration_test.cc │ │ ├── api_version_integration_test.cc │ │ ├── autonomous_upstream.cc │ │ ├── autonomous_upstream.h │ │ ├── base_integration_test.cc │ │ ├── base_integration_test.h │ │ ├── capture_fuzz.proto │ │ ├── capture_fuzz_gen.py │ │ ├── cds_integration_test.cc │ │ ├── cluster_filter_integration_test.cc │ │ ├── clusters/ │ │ │ ├── BUILD │ │ │ ├── cluster_factory_config.proto │ │ │ ├── custom_static_cluster.cc │ │ │ └── custom_static_cluster.h │ │ ├── custom_cluster_integration_test.cc │ │ ├── cx_limit_integration_test.cc │ │ ├── drain_close_integration_test.cc │ │ ├── dynamic_validation_integration_test.cc │ │ ├── echo_integration_test.cc │ │ ├── eds_integration_test.cc │ │ ├── extension_discovery_integration_test.cc │ │ ├── fake_upstream.cc │ │ ├── fake_upstream.h │ │ ├── filter_manager_integration_test.cc │ │ ├── filter_manager_integration_test.proto │ │ ├── filters/ │ │ │ ├── BUILD │ │ │ ├── add_body_filter.cc │ │ │ ├── add_trailers_filter.cc │ │ │ ├── backpressure_filter.cc │ │ │ ├── call_decodedata_once_filter.cc │ │ │ ├── clear_route_cache_filter.cc │ │ │ ├── common.h │ │ │ ├── continue_headers_only_inject_body_filter.cc │ │ │ ├── decode_headers_return_stop_all_filter.cc │ │ │ ├── eds_ready_filter.cc │ │ │ ├── encode_headers_return_stop_all_filter.cc │ │ │ ├── encoder_decoder_buffer_filter.cc │ │ │ ├── headers_only_filter.cc │ │ │ ├── local_reply_during_encoding_filter.cc │ │ │ ├── metadata_stop_all_filter.cc │ │ │ ├── modify_buffer_filter.cc │ │ │ ├── passthrough_filter.cc │ │ │ ├── pause_filter.cc │ │ │ ├── process_context_filter.cc │ │ │ ├── process_context_filter.h │ │ │ ├── random_pause_filter.cc │ │ │ ├── request_metadata_filter.cc │ │ │ ├── response_metadata_filter.cc │ │ │ ├── set_response_code_filter.cc │ │ │ ├── set_response_code_filter_config.proto │ │ │ ├── stop_iteration_and_continue_filter.cc │ │ │ ├── test_socket_interface.cc │ │ │ ├── test_socket_interface.h │ │ │ └── wait_for_whole_request_and_response.cc │ │ ├── h1_capture_direct_response_fuzz_test.cc │ │ ├── h1_capture_fuzz_test.cc │ │ ├── h1_corpus/ │ │ │ ├── BadPath.pb_text │ │ │ ├── EnvoyHandling100Continue.pb_text │ │ │ ├── InvalidContentLength.pb_text │ │ │ ├── NoHost.pb_text │ │ │ ├── RouterDownstreamDisconnectBeforeRequestComplete.pb_text │ │ │ ├── RouterDownstreamDisconnectBeforeResponseComplete.pb_text │ │ │ ├── RouterHeaderOnlyRequestAndResponseNoBuffer.pb_text │ │ │ ├── RouterRequestAndResponseWithBodyNoBuffer.pb_text │ │ │ ├── RouterUpstreamDisconnectBeforeRequestcomplete.pb_text │ │ │ ├── RouterUpstreamDisconnectBeforeResponseComplete.pb_text │ │ │ ├── RouterUpstreamResponseBeforeRequestComplete.pb_text │ │ │ ├── clusterfuzz-testcase-h1_capture_fuzz_test-5696503594090496 │ │ │ ├── clusterfuzz-testcase-h1_capture_fuzz_test-6215556767154176 │ │ │ ├── clusterfuzz-testcase-minimized-h1_capture_fuzz_test-5675304995782656 │ │ │ ├── clusterfuzz-testcase-minimized-h1_capture_fuzz_test-5738507290542080 │ │ │ ├── embed_null.pb_text │ │ │ ├── stream_info_destructor │ │ │ └── upstream_extra_crlf.pb_text │ │ ├── h1_fuzz.cc │ │ ├── h1_fuzz.h │ │ ├── h2_capture_direct_response_fuzz_test.cc │ │ ├── h2_capture_fuzz.proto │ │ ├── h2_capture_fuzz_test.cc │ │ ├── h2_corpus/ │ │ │ ├── metadata_test │ │ │ └── simple_test │ │ ├── h2_fuzz.cc │ │ ├── h2_fuzz.h │ │ ├── hds_integration_test.cc │ │ ├── header_casing_integration_test.cc │ │ ├── header_integration_test.cc │ │ ├── header_prefix_integration_test.cc │ │ ├── health_check_integration_test.cc │ │ ├── hotrestart_main.cc │ │ ├── hotrestart_test.sh │ │ ├── http2_integration_test.cc │ │ ├── http2_integration_test.h │ │ ├── http2_upstream_integration_test.cc │ │ ├── http2_upstream_integration_test.h │ │ ├── http_integration.cc │ │ ├── http_integration.h │ │ ├── http_protocol_integration.cc │ │ ├── http_protocol_integration.h │ │ ├── http_subset_lb_integration_test.cc │ │ ├── http_timeout_integration_test.cc │ │ ├── http_timeout_integration_test.h │ │ ├── idle_timeout_integration_test.cc │ │ ├── integration.h │ │ ├── integration_admin_test.cc │ │ ├── integration_admin_test.h │ │ ├── integration_stream_decoder.cc │ │ ├── integration_stream_decoder.h │ │ ├── integration_tcp_client.cc │ │ ├── integration_tcp_client.h │ │ ├── integration_test.cc │ │ ├── integration_test.h │ │ ├── listener_filter_integration_test.cc │ │ ├── listener_lds_integration_test.cc │ │ ├── load_stats_integration_test.cc │ │ ├── local_reply_integration_test.cc │ │ ├── overload_integration_test.cc │ │ ├── protocol_integration_test.cc │ │ ├── proxy_proto_integration_test.cc │ │ ├── proxy_proto_integration_test.h │ │ ├── redirect_integration_test.cc │ │ ├── rtds_integration_test.cc │ │ ├── run_envoy_test.sh │ │ ├── scoped_rds_integration_test.cc │ │ ├── sds_dynamic_integration_test.cc │ │ ├── sds_generic_secret_integration_test.cc │ │ ├── sds_static_integration_test.cc │ │ ├── server.cc │ │ ├── server.h │ │ ├── server_stats.h │ │ ├── socket_interface_integration_test.cc │ │ ├── ssl_utility.cc │ │ ├── ssl_utility.h │ │ ├── stats_integration_test.cc │ │ ├── tcp_conn_pool_integration_test.cc │ │ ├── tcp_dump.cc │ │ ├── tcp_dump.h │ │ ├── tcp_proxy_integration_test.cc │ │ ├── tcp_proxy_integration_test.h │ │ ├── tcp_proxy_integration_test.proto │ │ ├── tcp_tunneling_integration_test.cc │ │ ├── test_host_predicate.h │ │ ├── test_host_predicate_config.h │ │ ├── test_utility.sh │ │ ├── transport_socket_match_integration_test.cc │ │ ├── uds_integration_test.cc │ │ ├── uds_integration_test.h │ │ ├── utility.cc │ │ ├── utility.h │ │ ├── version_integration_test.cc │ │ ├── vhds_integration_test.cc │ │ ├── websocket_integration_test.cc │ │ ├── websocket_integration_test.h │ │ ├── xds_integration_test.cc │ │ ├── xfcc_integration_test.cc │ │ └── xfcc_integration_test.h │ ├── main.cc │ ├── mocks/ │ │ ├── BUILD │ │ ├── access_log/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── api/ │ │ │ ├── BUILD │ │ │ ├── hot_restart.h │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── buffer/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── common.cc │ │ ├── common.h │ │ ├── compression/ │ │ │ ├── compressor/ │ │ │ │ ├── BUILD │ │ │ │ ├── mocks.cc │ │ │ │ └── mocks.h │ │ │ └── decompressor/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── config/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── event/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ ├── mocks.h │ │ │ └── wrapped_dispatcher.h │ │ ├── filesystem/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── grpc/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── http/ │ │ │ ├── BUILD │ │ │ ├── api_listener.cc │ │ │ ├── api_listener.h │ │ │ ├── conn_pool.cc │ │ │ ├── conn_pool.h │ │ │ ├── mocks.cc │ │ │ ├── mocks.h │ │ │ ├── mocks_test.cc │ │ │ ├── stream.cc │ │ │ ├── stream.h │ │ │ ├── stream_decoder.cc │ │ │ ├── stream_decoder.h │ │ │ ├── stream_encoder.cc │ │ │ └── stream_encoder.h │ │ ├── init/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── local_info/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── local_reply/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── network/ │ │ │ ├── BUILD │ │ │ ├── connection.cc │ │ │ ├── connection.h │ │ │ ├── io_handle.cc │ │ │ ├── io_handle.h │ │ │ ├── mocks.cc │ │ │ ├── mocks.h │ │ │ ├── socket.cc │ │ │ ├── socket.h │ │ │ ├── transport_socket.cc │ │ │ └── transport_socket.h │ │ ├── protobuf/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── ratelimit/ │ │ │ ├── BUILD │ │ │ └── mocks.h │ │ ├── redis/ │ │ │ └── BUILD │ │ ├── router/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ ├── mocks.h │ │ │ ├── router_filter_interface.cc │ │ │ └── router_filter_interface.h │ │ ├── runtime/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── secret/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── server/ │ │ │ ├── BUILD │ │ │ ├── admin.cc │ │ │ ├── admin.h │ │ │ ├── admin_stream.cc │ │ │ ├── admin_stream.h │ │ │ ├── bootstrap_extension_factory.cc │ │ │ ├── bootstrap_extension_factory.h │ │ │ ├── config_tracker.cc │ │ │ ├── config_tracker.h │ │ │ ├── drain_manager.cc │ │ │ ├── drain_manager.h │ │ │ ├── factory_context.cc │ │ │ ├── factory_context.h │ │ │ ├── filter_chain_factory_context.cc │ │ │ ├── filter_chain_factory_context.h │ │ │ ├── guard_dog.cc │ │ │ ├── guard_dog.h │ │ │ ├── health_checker_factory_context.cc │ │ │ ├── health_checker_factory_context.h │ │ │ ├── hot_restart.cc │ │ │ ├── hot_restart.h │ │ │ ├── instance.cc │ │ │ ├── instance.h │ │ │ ├── listener_component_factory.cc │ │ │ ├── listener_component_factory.h │ │ │ ├── listener_factory_context.cc │ │ │ ├── listener_factory_context.h │ │ │ ├── listener_manager.cc │ │ │ ├── listener_manager.h │ │ │ ├── main.h │ │ │ ├── mocks.h │ │ │ ├── options.cc │ │ │ ├── options.h │ │ │ ├── overload_manager.cc │ │ │ ├── overload_manager.h │ │ │ ├── server_lifecycle_notifier.cc │ │ │ ├── server_lifecycle_notifier.h │ │ │ ├── tracer_factory.cc │ │ │ ├── tracer_factory.h │ │ │ ├── tracer_factory_context.cc │ │ │ ├── tracer_factory_context.h │ │ │ ├── transport_socket_factory_context.cc │ │ │ ├── transport_socket_factory_context.h │ │ │ ├── watch_dog.cc │ │ │ ├── watch_dog.h │ │ │ ├── watchdog_config.cc │ │ │ ├── watchdog_config.h │ │ │ ├── worker.cc │ │ │ ├── worker.h │ │ │ ├── worker_factory.cc │ │ │ └── worker_factory.h │ │ ├── ssl/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── stats/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── stream_info/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── tcp/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── thread_local/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ ├── tracing/ │ │ │ ├── BUILD │ │ │ ├── mocks.cc │ │ │ └── mocks.h │ │ └── upstream/ │ │ ├── BUILD │ │ ├── basic_resource_limit.cc │ │ ├── basic_resource_limit.h │ │ ├── cds_api.cc │ │ ├── cds_api.h │ │ ├── cluster.cc │ │ ├── cluster.h │ │ ├── cluster_info.cc │ │ ├── cluster_info.h │ │ ├── cluster_info_factory.cc │ │ ├── cluster_info_factory.h │ │ ├── cluster_manager.cc │ │ ├── cluster_manager.h │ │ ├── cluster_manager_factory.cc │ │ ├── cluster_manager_factory.h │ │ ├── cluster_priority_set.cc │ │ ├── cluster_priority_set.h │ │ ├── cluster_real_priority_set.cc │ │ ├── cluster_real_priority_set.h │ │ ├── cluster_update_callbacks.cc │ │ ├── cluster_update_callbacks.h │ │ ├── cluster_update_callbacks_handle.cc │ │ ├── cluster_update_callbacks_handle.h │ │ ├── health_check_event_logger.h │ │ ├── health_checker.cc │ │ ├── health_checker.h │ │ ├── host.cc │ │ ├── host.h │ │ ├── host_set.cc │ │ ├── host_set.h │ │ ├── load_balancer.cc │ │ ├── load_balancer.h │ │ ├── load_balancer_context.cc │ │ ├── load_balancer_context.h │ │ ├── mocks.h │ │ ├── priority_set.cc │ │ ├── priority_set.h │ │ ├── retry_host_predicate.cc │ │ ├── retry_host_predicate.h │ │ ├── retry_priority.cc │ │ ├── retry_priority.h │ │ ├── retry_priority_factory.h │ │ ├── test_retry_host_predicate_factory.h │ │ ├── thread_aware_load_balancer.cc │ │ ├── thread_aware_load_balancer.h │ │ ├── thread_local_cluster.cc │ │ ├── thread_local_cluster.h │ │ ├── transport_socket_match.cc │ │ └── transport_socket_match.h │ ├── per_file_coverage.sh │ ├── proto/ │ │ ├── BUILD │ │ ├── bookstore.proto │ │ ├── deprecated.proto │ │ ├── helloworld.proto │ │ └── sensitive.proto │ ├── run_envoy_bazel_coverage.sh │ ├── server/ │ │ ├── BUILD │ │ ├── admin/ │ │ │ ├── BUILD │ │ │ ├── admin_filter_test.cc │ │ │ ├── admin_instance.cc │ │ │ ├── admin_instance.h │ │ │ ├── admin_test.cc │ │ │ ├── clusters_handler_test.cc │ │ │ ├── config_dump_handler_test.cc │ │ │ ├── config_tracker_impl_test.cc │ │ │ ├── init_dump_handler_test.cc │ │ │ ├── logs_handler_test.cc │ │ │ ├── profiling_handler_test.cc │ │ │ ├── prometheus_stats_test.cc │ │ │ ├── runtime_handler_test.cc │ │ │ ├── server_info_handler_test.cc │ │ │ └── stats_handler_test.cc │ │ ├── api_listener_test.cc │ │ ├── backtrace_test.cc │ │ ├── config_validation/ │ │ │ ├── BUILD │ │ │ ├── async_client_test.cc │ │ │ ├── cluster_manager_test.cc │ │ │ ├── config_fuzz_test.cc │ │ │ ├── dispatcher_test.cc │ │ │ ├── server_test.cc │ │ │ ├── test_data/ │ │ │ │ └── runtime_config.yaml │ │ │ ├── xds_corpus/ │ │ │ │ ├── clusterfuzz-testcase-minimized-xds_fuzz_test-6524356210196480 │ │ │ │ ├── clusterfuzz-testcase-xds_fuzz_test-6589246463541248 │ │ │ │ ├── example0 │ │ │ │ ├── example1 │ │ │ │ ├── example10 │ │ │ │ ├── example13 │ │ │ │ ├── example2 │ │ │ │ ├── example3 │ │ │ │ ├── example4 │ │ │ │ ├── example5 │ │ │ │ ├── example6 │ │ │ │ ├── example7 │ │ │ │ ├── example8 │ │ │ │ └── example9 │ │ │ ├── xds_fuzz.cc │ │ │ ├── xds_fuzz.h │ │ │ ├── xds_fuzz.proto │ │ │ ├── xds_fuzz_test.cc │ │ │ ├── xds_verifier.cc │ │ │ ├── xds_verifier.h │ │ │ └── xds_verifier_test.cc │ │ ├── configuration_impl_test.cc │ │ ├── connection_handler_test.cc │ │ ├── drain_manager_impl_test.cc │ │ ├── filter_chain_benchmark_test.cc │ │ ├── filter_chain_manager_impl_test.cc │ │ ├── guarddog_impl_test.cc │ │ ├── hot_restart_impl_test.cc │ │ ├── hot_restarting_parent_test.cc │ │ ├── lds_api_test.cc │ │ ├── listener_manager_impl_quic_only_test.cc │ │ ├── listener_manager_impl_test.cc │ │ ├── listener_manager_impl_test.h │ │ ├── options_impl_test.cc │ │ ├── overload_manager_impl_test.cc │ │ ├── server_corpus/ │ │ │ ├── api_boost_crash │ │ │ ├── clusterfuzz-testcase-config_fuzz_test-4788023076847616 │ │ │ ├── clusterfuzz-testcase-config_fuzz_test-5067970991095808 │ │ │ ├── clusterfuzz-testcase-config_fuzz_test-5664687524413440 │ │ │ ├── clusterfuzz-testcase-config_fuzz_test-5697041979146240 │ │ │ ├── clusterfuzz-testcase-config_fuzz_test-5729922022113280 │ │ │ ├── clusterfuzz-testcase-config_fuzz_test-5747944989392896 │ │ │ ├── clusterfuzz-testcase-config_fuzz_test-6287096397430784 │ │ │ ├── clusterfuzz-testcase-minimized-config_fuzz_test-5118008002871296 │ │ │ ├── clusterfuzz-testcase-minimized-config_fuzz_test-5118008002871297 │ │ │ ├── clusterfuzz-testcase-minimized-config_fuzz_test-5186283155750912 │ │ │ ├── clusterfuzz-testcase-minimized-config_fuzz_test-5666128418832384 │ │ │ ├── clusterfuzz-testcase-minimized-config_fuzz_test-5674078337236992 │ │ │ ├── clusterfuzz-testcase-minimized-config_fuzz_test-5702999713513472 │ │ │ ├── clusterfuzz-testcase-minimized-config_fuzz_test-5754548048625664 │ │ │ ├── clusterfuzz-testcase-minimized-config_fuzz_test-5762646786179072 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5083428128030720 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5084029869883392 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5632902623657984 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5633109961998336 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5665272556158976 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5665941383282688 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5686444035670016 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5690948441341952 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5705296232579072 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5714049408172032 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5724853840117760 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5730612661452800 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5733243234811904 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5742573780467712 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5751467204411392 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-5761881319407616 │ │ │ ├── clusterfuzz-testcase-minimized-server_fuzz_test-6246954531291136 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-4832853025095680 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-4890981380915200 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5085107063881728 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5366294281977856 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5647989147697152 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5691106634760192 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5696568846450688 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5697356077989888 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5704964522377216 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5705154446753792 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5734693923717120 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5754606195310592 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5755877701713920 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5763613693837312 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5809171076218880 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-5988544525893632 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-6036175623028736 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-6236930453798912 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-6280208148594688 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-6288786894880768 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-6313779791921152 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-6419204524736512 │ │ │ ├── clusterfuzz-testcase-server_fuzz_test-6610050496856064 │ │ │ ├── crash-38abba5264d01217f4f027f02dc403eae6eda8bb │ │ │ ├── crash-ac725507195d840cdb90bed3079b877e6e9419e3 │ │ │ ├── crash-cbd98584afd43791dc2143260c4438f4d2db5e87 │ │ │ ├── crash-d60f68abcafaae8e7b135ca5144b062d969e5575 │ │ │ ├── crash-da39a3ee5e6b4b0d3255bfef95601890afd80709 │ │ │ ├── crash-db2ee19f50162f2079dc0c5ba24fd0e3dcb8b9bc │ │ │ ├── crash-e0339370f24027b5c73b5355e74c0b68c8b33314 │ │ │ ├── google_com_proxy.v2.pb_text │ │ │ ├── not_implemented_envoy_internal │ │ │ ├── not_reached │ │ │ └── valid │ │ ├── server_fuzz_test.cc │ │ ├── server_test.cc │ │ ├── ssl_context_manager_test.cc │ │ ├── test_data/ │ │ │ ├── runtime/ │ │ │ │ ├── override/ │ │ │ │ │ └── some_service/ │ │ │ │ │ └── fizz │ │ │ │ └── primary/ │ │ │ │ └── fizz │ │ │ ├── server/ │ │ │ │ ├── bad_sds_config_source.yaml │ │ │ │ ├── bootstrap_extensions.yaml │ │ │ │ ├── callbacks_stats_sink_bootstrap.yaml │ │ │ │ ├── cluster_dupe_bootstrap.yaml │ │ │ │ ├── cluster_health_check_bootstrap.yaml │ │ │ │ ├── empty_bootstrap.yaml │ │ │ │ ├── invalid_bootstrap.yaml │ │ │ │ ├── invalid_layered_runtime_duplicate_name.yaml │ │ │ │ ├── invalid_layered_runtime_missing_name.yaml │ │ │ │ ├── invalid_layered_runtime_no_layer_specifier.yaml │ │ │ │ ├── invalid_legacy_runtime_bootstrap.yaml │ │ │ │ ├── invalid_runtime_bootstrap.yaml │ │ │ │ ├── node_bootstrap.pb_text │ │ │ │ ├── node_bootstrap.yaml │ │ │ │ ├── node_bootstrap_no_admin_port.yaml │ │ │ │ ├── node_bootstrap_with_admin_socket_options.yaml │ │ │ │ ├── node_bootstrap_without_access_log.yaml │ │ │ │ ├── proxy_version_bootstrap.yaml │ │ │ │ ├── runtime_bootstrap.yaml │ │ │ │ ├── runtime_bootstrap_ads_eds.yaml │ │ │ │ ├── runtime_bootstrap_eds.yaml │ │ │ │ ├── stats_sink_bootstrap.yaml │ │ │ │ ├── unparseable_bootstrap.yaml │ │ │ │ ├── valid_v2_but_invalid_v3_bootstrap.pb_text │ │ │ │ ├── valid_v2_but_invalid_v3_bootstrap.yaml │ │ │ │ ├── valid_v3_but_invalid_v2_bootstrap.pb_text │ │ │ │ ├── valid_v3_but_invalid_v2_bootstrap.yaml │ │ │ │ ├── watchdogs_bootstrap_with_deprecated_field.yaml │ │ │ │ └── zipkin_tracing_deprecated_config.yaml │ │ │ └── static_validation/ │ │ │ ├── bootstrap_unknown_field.yaml │ │ │ ├── cluster_unknown_field.yaml │ │ │ ├── listener_unknown_field.yaml │ │ │ └── network_filter_unknown_field.yaml │ │ ├── utility.h │ │ └── worker_impl_test.cc │ ├── test_common/ │ │ ├── BUILD │ │ ├── contention.cc │ │ ├── contention.h │ │ ├── environment.cc │ │ ├── environment.h │ │ ├── file_system_for_test.cc │ │ ├── file_system_for_test.h │ │ ├── global.cc │ │ ├── global.h │ │ ├── global_test.cc │ │ ├── logging.cc │ │ ├── logging.h │ │ ├── network_utility.cc │ │ ├── network_utility.h │ │ ├── network_utility_test.cc │ │ ├── only_one_thread.cc │ │ ├── only_one_thread.h │ │ ├── printers.cc │ │ ├── printers.h │ │ ├── registry.h │ │ ├── resources.h │ │ ├── simulated_time_system.cc │ │ ├── simulated_time_system.h │ │ ├── simulated_time_system_test.cc │ │ ├── status_utility.h │ │ ├── test_runtime.h │ │ ├── test_time.cc │ │ ├── test_time.h │ │ ├── test_time_system.cc │ │ ├── test_time_system.h │ │ ├── test_time_system_test.cc │ │ ├── test_version_linkstamp.cc │ │ ├── thread_factory_for_test.cc │ │ ├── thread_factory_for_test.h │ │ ├── threadsafe_singleton_injector.h │ │ ├── utility.cc │ │ ├── utility.h │ │ ├── utility_test.cc │ │ └── wasm_base.h │ ├── test_listener.cc │ ├── test_listener.h │ ├── test_runner.cc │ ├── test_runner.h │ └── tools/ │ ├── config_load_check/ │ │ ├── BUILD │ │ └── config_load_check.cc │ ├── router_check/ │ │ ├── BUILD │ │ ├── coverage.cc │ │ ├── coverage.h │ │ ├── router.cc │ │ ├── router.h │ │ ├── router_check.cc │ │ ├── test/ │ │ │ ├── BUILD │ │ │ ├── config/ │ │ │ │ ├── ClusterHeader.golden.proto.json │ │ │ │ ├── ClusterHeader.yaml │ │ │ │ ├── ComprehensiveRoutes.golden.proto.json │ │ │ │ ├── ComprehensiveRoutes.yaml │ │ │ │ ├── ContentType.golden.proto.json │ │ │ │ ├── ContentType.yaml │ │ │ │ ├── DirectResponse.golden.proto.json │ │ │ │ ├── DirectResponse.yaml │ │ │ │ ├── HeaderMatchedRouting.golden.proto.json │ │ │ │ ├── HeaderMatchedRouting.yaml │ │ │ │ ├── Redirect.golden.proto.json │ │ │ │ ├── Redirect.yaml │ │ │ │ ├── Redirect2.golden.proto.json │ │ │ │ ├── Redirect2.yaml │ │ │ │ ├── Redirect3.golden.proto.json │ │ │ │ ├── Redirect3.yaml │ │ │ │ ├── Runtime.golden.proto.json │ │ │ │ ├── Runtime.yaml │ │ │ │ ├── TestRoutes.golden.proto.json │ │ │ │ ├── TestRoutes.yaml │ │ │ │ ├── TestRoutesFailures.golden.proto.json │ │ │ │ ├── Weighted.golden.proto.json │ │ │ │ ├── Weighted.golden.proto.pb_text │ │ │ │ ├── Weighted.golden.proto.yaml │ │ │ │ └── Weighted.yaml │ │ │ └── route_tests.sh │ │ └── validation.proto │ ├── schema_validator/ │ │ ├── BUILD │ │ ├── schema_validator.cc │ │ ├── validator.cc │ │ └── validator.h │ ├── type_whisperer/ │ │ ├── BUILD │ │ └── api_type_db_test.cc │ └── wee8_compile/ │ ├── BUILD │ └── wee8_compile.cc ├── third_party/ │ └── statusor/ │ ├── BUILD │ ├── statusor.cc │ ├── statusor.h │ ├── statusor_internals.h │ └── statusor_test.cc └── tools/ ├── BUILD ├── api/ │ ├── generate_go_protobuf.py │ └── validate_structure.py ├── api_boost/ │ ├── README.md │ ├── api_boost.py │ ├── api_boost_test.py │ └── testdata/ │ ├── BUILD │ ├── decl_ref_expr.cc │ ├── decl_ref_expr.cc.gold │ ├── deprecate.cc │ ├── deprecate.cc.gold │ ├── elaborated_type.cc │ ├── elaborated_type.cc.gold │ ├── no_boost_file.cc │ ├── no_boost_file.cc.gold │ ├── rename.cc │ ├── rename.cc.gold │ ├── using_decl.cc │ ├── using_decl.cc.gold │ ├── validate.cc │ └── validate.cc.gold ├── api_proto_plugin/ │ ├── BUILD │ ├── __init__.py │ ├── annotations.py │ ├── plugin.bzl │ ├── plugin.py │ ├── traverse.py │ ├── type_context.py │ ├── utils.py │ └── visitor.py ├── bazel-test-docker.sh ├── bootstrap2pb.cc ├── build_profile.py ├── check_repositories.sh ├── clang_tools/ │ ├── README.md │ ├── api_booster/ │ │ ├── BUILD │ │ ├── main.cc │ │ ├── proto_cxx_utils.cc │ │ ├── proto_cxx_utils.h │ │ └── proto_cxx_utils_test.cc │ ├── support/ │ │ ├── BUILD │ │ ├── BUILD.prebuilt │ │ └── clang_tools.bzl │ └── syntax_only/ │ ├── BUILD │ └── main.cc ├── code_format/ │ ├── .style.yapf │ ├── check_format.py │ ├── check_format_test.sh │ ├── check_format_test_helper.py │ ├── check_format_test_helper.sh │ ├── check_shellcheck_format.sh │ ├── common.py │ ├── envoy_build_fixer.py │ ├── format_python_tools.py │ ├── format_python_tools.sh │ ├── header_order.py │ ├── paths.py │ └── requirements.txt ├── config_validation/ │ ├── BUILD │ ├── requirements.txt │ └── validate_fragment.py ├── debugging/ │ ├── run-valgrind.sh │ └── valgrind-suppressions.txt ├── dependency/ │ ├── validate.py │ └── validate_test.py ├── deprecate_features/ │ ├── deprecate_features.py │ ├── deprecate_features.sh │ └── requirements.txt ├── deprecate_version/ │ ├── deprecate_version.py │ ├── deprecate_version.sh │ └── requirements.txt ├── docker_wrapper.sh ├── envoy-rotate-files.el ├── envoy_collect/ │ ├── README.md │ └── envoy_collect.py ├── envoy_headersplit/ │ ├── BUILD │ ├── README.md │ ├── code_corpus/ │ │ ├── class_defn.h │ │ ├── class_defn_without_namespace.h │ │ ├── class_impl.cc │ │ ├── fail_mocks.cc │ │ ├── fail_mocks.h │ │ ├── fake_build │ │ ├── fake_source_code.cc │ │ └── hello.h │ ├── headersplit.py │ ├── headersplit_test.py │ ├── replace_includes.py │ ├── replace_includes_test.py │ └── requirements.txt ├── find_related_envoy_files.py ├── gen_compilation_database.py ├── git/ │ ├── last_github_commit.sh │ └── modified_since_last_github_commit.sh ├── github/ │ ├── requirements.txt │ ├── sync_assignable.py │ └── sync_assignable.sh ├── path_fix.sh ├── print_dependencies.py ├── proto_format/ │ ├── active_protos_gen.py │ ├── proto_format.sh │ └── proto_sync.py ├── protodoc/ │ ├── BUILD │ ├── generate_empty.py │ ├── manifest.proto │ ├── protodoc.bzl │ ├── protodoc.py │ └── requirements.txt ├── protoxform/ │ ├── BUILD │ ├── merge_active_shadow.py │ ├── merge_active_shadow_test.py │ ├── migrate.py │ ├── options.py │ ├── protoprint.py │ ├── protoxform.bzl │ ├── protoxform.py │ ├── protoxform_test.sh │ ├── protoxform_test_helper.py │ └── utils.py ├── run_command.py ├── shell_utils.sh ├── socket_passing.py ├── spelling/ │ ├── check_spelling.sh │ ├── check_spelling_pedantic.py │ ├── check_spelling_pedantic_test.py │ ├── check_spelling_pedantic_test.sh │ ├── spelling_allowlist_words.txt │ ├── spelling_dictionary.txt │ └── spelling_skip_files.txt ├── stack_decode.py ├── testdata/ │ ├── check_format/ │ │ ├── add_envoy_package.BUILD │ │ ├── add_envoy_package.BUILD.gold │ │ ├── angle_bracket_include.cc │ │ ├── angle_bracket_include.cc.gold │ │ ├── api/ │ │ │ └── missing_package.proto │ │ ├── attribute_packed.cc │ │ ├── bad_envoy_build_sys_ref.BUILD │ │ ├── bad_envoy_build_sys_ref.BUILD.gold │ │ ├── bazel_tools.BUILD │ │ ├── canonical_api_deps.BUILD │ │ ├── canonical_api_deps.BUILD.gold │ │ ├── canonical_api_deps.cc │ │ ├── canonical_api_deps.h │ │ ├── canonical_api_deps.other.cc │ │ ├── canonical_spacing.BUILD │ │ ├── canonical_spacing.BUILD.gold │ │ ├── clang_format_double_off.cc │ │ ├── clang_format_double_on.cc │ │ ├── clang_format_off.cc │ │ ├── clang_format_on.cc │ │ ├── clang_format_on.cc.gold │ │ ├── clang_format_trailing_off.cc │ │ ├── commented_throw.cc │ │ ├── condition_variable.cc │ │ ├── condition_variable_any.cc │ │ ├── condvar_wait_for.cc │ │ ├── counter_from_string.cc │ │ ├── cpp_std.cc │ │ ├── cpp_std.cc.gold │ │ ├── designated_initializers.cc │ │ ├── duration_value.cc │ │ ├── duration_value_zero.cc │ │ ├── elvis_operator.cc │ │ ├── extra_enthusiastic_spaces.cc │ │ ├── extra_enthusiastic_spaces.cc.gold │ │ ├── gauge_from_string.cc │ │ ├── grpc_init.cc │ │ ├── grpc_shutdown.cc │ │ ├── header_order.cc │ │ ├── header_order.cc.gold │ │ ├── histogram_from_string.cc │ │ ├── license.BUILD │ │ ├── license.BUILD.gold │ │ ├── long_line.cc │ │ ├── long_line.cc.gold │ │ ├── mutex.cc │ │ ├── no_namespace_envoy.cc │ │ ├── over_enthusiastic_spaces.cc │ │ ├── over_enthusiastic_spaces.cc.gold │ │ ├── pgv_string.proto │ │ ├── proto.BUILD │ │ ├── proto.BUILD.gold │ │ ├── proto_deps.cc │ │ ├── proto_deps.cc.gold │ │ ├── proto_enum_mangling.cc │ │ ├── proto_format.proto │ │ ├── proto_format.proto.gold │ │ ├── proto_style.cc │ │ ├── proto_style.cc.gold │ │ ├── real_time_source.cc │ │ ├── real_time_source_override.cc │ │ ├── real_time_system.cc │ │ ├── regex.cc │ │ ├── remove_unused_loads.BUILD │ │ ├── remove_unused_loads.BUILD.gold │ │ ├── serialize_as_string.cc │ │ ├── shared_mutex.cc │ │ ├── skip_envoy_package.BUILD │ │ ├── skip_envoy_package.BUILD.gold │ │ ├── sleep.cc │ │ ├── std_any.cc │ │ ├── std_atomic_free_functions.cc │ │ ├── std_get_if.cc │ │ ├── std_get_time.cc │ │ ├── std_holds_alternative.cc │ │ ├── std_make_optional.cc │ │ ├── std_monostate.cc │ │ ├── std_optional.cc │ │ ├── std_string_view.cc │ │ ├── std_unordered_map.cc │ │ ├── std_unordered_set.cc │ │ ├── std_variant.cc │ │ ├── std_visit.cc │ │ ├── steady_clock.cc │ │ ├── strerror.cc │ │ ├── system_clock.cc │ │ ├── test/ │ │ │ └── register_factory.cc │ │ ├── test_naming.cc │ │ ├── testing_test.cc │ │ ├── throw.cc │ │ ├── time_system_wait_for.cc │ │ ├── unpack_to.cc │ │ ├── update_license.BUILD │ │ ├── update_license.BUILD.gold │ │ └── version_history/ │ │ └── current.rst │ ├── protoxform/ │ │ ├── BUILD │ │ ├── envoy/ │ │ │ ├── active_non_terminal/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ ├── active_non_terminal.proto │ │ │ │ ├── active_non_terminal.proto.active_or_frozen.gold │ │ │ │ ├── active_non_terminal.proto.next_major_version_candidate.envoy_internal.gold │ │ │ │ └── active_non_terminal.proto.next_major_version_candidate.gold │ │ │ ├── active_terminal/ │ │ │ │ └── v2/ │ │ │ │ ├── BUILD │ │ │ │ ├── active_terminal.proto │ │ │ │ ├── active_terminal.proto.active_or_frozen.gold │ │ │ │ ├── active_terminal.proto.next_major_version_candidate.envoy_internal.gold │ │ │ │ └── active_terminal.proto.next_major_version_candidate.gold │ │ │ ├── frozen/ │ │ │ │ ├── v2/ │ │ │ │ │ ├── BUILD │ │ │ │ │ ├── frozen.proto │ │ │ │ │ ├── frozen.proto.active_or_frozen.gold │ │ │ │ │ ├── frozen.proto.next_major_version_candidate.envoy_internal.gold │ │ │ │ │ └── frozen.proto.next_major_version_candidate.gold │ │ │ │ └── v3/ │ │ │ │ ├── BUILD │ │ │ │ ├── frozen.proto │ │ │ │ ├── frozen.proto.active_or_frozen.gold │ │ │ │ ├── frozen.proto.next_major_version_candidate.envoy_internal.gold │ │ │ │ └── frozen.proto.next_major_version_candidate.gold │ │ │ └── v2/ │ │ │ ├── BUILD │ │ │ ├── discovery_service.proto │ │ │ ├── discovery_service.proto.active_or_frozen.gold │ │ │ ├── discovery_service.proto.next_major_version_candidate.envoy_internal.gold │ │ │ ├── discovery_service.proto.next_major_version_candidate.gold │ │ │ ├── fully_qualified_names.proto │ │ │ ├── fully_qualified_names.proto.active_or_frozen.gold │ │ │ ├── fully_qualified_names.proto.next_major_version_candidate.envoy_internal.gold │ │ │ ├── fully_qualified_names.proto.next_major_version_candidate.gold │ │ │ ├── oneof.proto │ │ │ ├── oneof.proto.active_or_frozen.gold │ │ │ ├── oneof.proto.next_major_version_candidate.envoy_internal.gold │ │ │ ├── oneof.proto.next_major_version_candidate.gold │ │ │ ├── package_move.proto │ │ │ ├── package_move.proto.active_or_frozen.gold │ │ │ ├── package_move.proto.next_major_version_candidate.envoy_internal.gold │ │ │ ├── package_move.proto.next_major_version_candidate.gold │ │ │ ├── sample.proto │ │ │ ├── sample.proto.active_or_frozen.gold │ │ │ ├── sample.proto.next_major_version_candidate.envoy_internal.gold │ │ │ └── sample.proto.next_major_version_candidate.gold │ │ └── external/ │ │ ├── BUILD │ │ ├── package_type.proto │ │ └── root_type.proto │ └── spelling/ │ ├── exclusions │ ├── on_off │ ├── rst_code_block │ ├── skip_blocks │ ├── skip_file │ ├── typos │ ├── valid │ └── word_splitting ├── type_whisperer/ │ ├── BUILD │ ├── api_build_file.bzl │ ├── api_type_db.cc │ ├── api_type_db.h │ ├── api_type_db.proto │ ├── file_descriptor_set_text.bzl │ ├── file_descriptor_set_text_gen.py │ ├── proto_build_targets_gen.py │ ├── proto_cc_source.bzl │ ├── proto_cc_source_gen.py │ ├── type_database.bzl │ ├── type_whisperer.bzl │ ├── type_whisperer.py │ ├── typedb_gen.py │ └── types.proto └── vscode/ ├── README.md ├── generate_debug_config.py └── refresh_compdb.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .azure-pipelines/bazel.yml ================================================ parameters: - name: ciTarget displayName: "CI target" type: string default: bazel.release - name: artifactSuffix displayName: "Suffix of artifact" type: string default: "" - name: rbe displayName: "Enable RBE" type: boolean default: true - name: managedAgent type: boolean default: true - name: bazelBuildExtraOptions type: string default: "--flaky_test_attempts=2" steps: - task: Cache@2 inputs: key: '"${{ parameters.ciTarget }}" | ./WORKSPACE | **/*.bzl' path: $(Build.StagingDirectory)/repository_cache continueOnError: true - bash: .azure-pipelines/cleanup.sh displayName: "Removing tools from agent" condition: ${{ parameters.managedAgent }} - bash: | echo "disk space at beginning of build:" df -h displayName: "Check disk space at beginning" - bash: | sudo mkdir -p /etc/docker echo '{ "ipv6": true, "fixed-cidr-v6": "2001:db8:1::/64" }' | sudo tee /etc/docker/daemon.json sudo service docker restart displayName: "Enable IPv6" condition: ${{ parameters.managedAgent }} - script: ci/run_envoy_docker.sh 'ci/do_ci.sh ${{ parameters.ciTarget }}' workingDirectory: $(Build.SourcesDirectory) env: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) SLACK_TOKEN: $(SLACK_TOKEN) REPO_URI: $(Build.Repository.Uri) BUILD_URI: $(Build.BuildUri) ${{ if parameters.rbe }}: ENVOY_RBE: "1" BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --jobs=$(RbeJobs) ${{ parameters.bazelBuildExtraOptions }}" BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) ${{ if eq(parameters.rbe, false) }}: BAZEL_BUILD_EXTRA_OPTIONS: "${{ parameters.bazelBuildExtraOptions }}" BAZEL_REMOTE_CACHE: $(LocalBuildCache) displayName: "Run CI script" - bash: | echo "disk space at end of build:" df -h displayName: "Check disk space at end" condition: always() - task: PublishTestResults@2 inputs: testResultsFiles: "**/bazel-out/**/testlogs/**/test.xml" testRunTitle: "${{ parameters.ciTarget }}" searchFolder: $(Build.StagingDirectory)/tmp condition: always() - task: PublishBuildArtifacts@1 inputs: pathtoPublish: "$(Build.StagingDirectory)/envoy" artifactName: ${{ parameters.ciTarget }}${{ parameters.artifactSuffix }} condition: always() ================================================ FILE: .azure-pipelines/cleanup.sh ================================================ #!/bin/bash set -e # Temporary script to remove tools from Azure pipelines agent to create more disk space room. sudo apt-get update -y sudo apt-get purge -y --no-upgrade 'ghc-*' 'zulu-*-azure-jdk' 'libllvm*' 'mysql-*' 'dotnet-*' 'libgl1' \ 'adoptopenjdk-*' 'azure-cli' 'google-chrome-stable' 'firefox' 'hhvm' dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -rn ================================================ FILE: .azure-pipelines/pipelines.yml ================================================ trigger: branches: include: - "master" - "release/v*" tags: include: - "v*" # PR build config is manually overridden in Azure pipelines UI with different secrets pr: none jobs: - job: format dependsOn: [] # this removes the implicit dependency on previous stage and causes this to run in parallel. pool: vmImage: "ubuntu-18.04" steps: - task: Cache@2 inputs: key: "format | ./WORKSPACE | **/*.bzl" path: $(Build.StagingDirectory)/repository_cache continueOnError: true - script: ci/run_envoy_docker.sh 'ci/check_and_fix_format.sh' workingDirectory: $(Build.SourcesDirectory) env: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) displayName: "Run check format scripts" - task: PublishBuildArtifacts@1 inputs: pathtoPublish: "$(Build.StagingDirectory)/fix_format.diff" artifactName: format condition: failed() - job: release displayName: "Linux-x64 release" dependsOn: ["format"] # For master builds, continue even if format fails condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) timeoutInMinutes: 360 pool: vmImage: "ubuntu-18.04" steps: - template: bazel.yml parameters: ciTarget: bazel.release - job: release_arm64 displayName: "Linux-arm64 release" dependsOn: ["format"] # For master builds, continue even if format fails condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) timeoutInMinutes: 360 pool: "arm-large" steps: - template: bazel.yml parameters: managedAgent: false ciTarget: bazel.release rbe: false artifactSuffix: ".arm64" bazelBuildExtraOptions: "--sandbox_base=/tmp/sandbox_base" - job: bazel displayName: "Linux-x64" dependsOn: ["release"] # For master builds, continue even if format fails condition: and(not(canceled()), or(succeeded(), ne(variables['Build.Reason'], 'PullRequest'))) strategy: maxParallel: 3 matrix: gcc: CI_TARGET: "bazel.gcc" clang_tidy: CI_TARGET: "bazel.clang_tidy" asan: CI_TARGET: "bazel.asan" tsan: CI_TARGET: "bazel.tsan" compile_time_options: CI_TARGET: "bazel.compile_time_options" timeoutInMinutes: 360 pool: vmImage: "ubuntu-18.04" steps: - template: bazel.yml parameters: ciTarget: $(CI_TARGET) - job: coverage displayName: "Linux-x64" dependsOn: ["release"] timeoutInMinutes: 360 pool: "x64-large" strategy: maxParallel: 2 matrix: coverage: CI_TARGET: "coverage" fuzz_coverage: CI_TARGET: "fuzz_coverage" steps: - template: bazel.yml parameters: managedAgent: false ciTarget: bazel.$(CI_TARGET) rbe: false # /tmp/sandbox_base is a tmpfs in CI environment to optimize large I/O for coverage traces bazelBuildExtraOptions: "--define=no_debug_info=1 --linkopt=-Wl,-s --test_env=ENVOY_IP_TEST_VERSIONS=v4only --sandbox_base=/tmp/sandbox_base" - script: ci/run_envoy_docker.sh 'ci/upload_gcs_artifact.sh /source/generated/$(CI_TARGET) $(CI_TARGET)' displayName: "Upload $(CI_TARGET) Report to GCS" env: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) GCS_ARTIFACT_BUCKET: $(GcsArtifactBucket) condition: always() - job: docker displayName: "Linux multi-arch docker" dependsOn: ["release", "release_arm64"] pool: vmImage: "ubuntu-18.04" steps: - bash: .azure-pipelines/cleanup.sh displayName: "Removing tools from agent" - task: DownloadBuildArtifacts@0 inputs: buildType: current artifactName: "bazel.release" itemPattern: "bazel.release/envoy_binary.tar.gz" downloadType: single targetPath: $(Build.StagingDirectory) - task: DownloadBuildArtifacts@0 inputs: buildType: current artifactName: "bazel.release.arm64" itemPattern: "bazel.release.arm64/envoy_binary.tar.gz" downloadType: single targetPath: $(Build.StagingDirectory) - bash: | set -e mkdir -p linux/amd64 && tar zxf $(Build.StagingDirectory)/bazel.release/envoy_binary.tar.gz -C ./linux/amd64 mkdir -p linux/arm64 && tar zxf $(Build.StagingDirectory)/bazel.release.arm64/envoy_binary.tar.gz -C ./linux/arm64 ci/docker_ci.sh workingDirectory: $(Build.SourcesDirectory) env: AZP_BRANCH: $(Build.SourceBranch) AZP_SHA1: $(Build.SourceVersion) DOCKERHUB_USERNAME: $(DockerUsername) DOCKERHUB_PASSWORD: $(DockerPassword) - task: PublishBuildArtifacts@1 inputs: pathtoPublish: "$(Build.StagingDirectory)/build_images" artifactName: docker condition: always() - job: examples dependsOn: ["docker"] displayName: "Verify examples run as documented" pool: vmImage: "ubuntu-18.04" steps: - task: DownloadBuildArtifacts@0 inputs: buildType: current artifactName: "docker" itemPattern: "docker/envoy-docker-images.tar.xz" downloadType: single targetPath: $(Build.StagingDirectory) - bash: ./ci/do_ci.sh verify_examples env: ENVOY_DOCKER_BUILD_DIR: $(Build.StagingDirectory) NO_BUILD_SETUP: 1 - job: macOS dependsOn: ["format"] timeoutInMinutes: 360 pool: vmImage: "macos-latest" steps: - script: ./ci/mac_ci_setup.sh displayName: "Install dependencies" - script: ./ci/mac_ci_steps.sh displayName: "Run Mac CI" env: BAZEL_BUILD_EXTRA_OPTIONS: "--remote_download_toplevel --flaky_test_attempts=2" BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - task: PublishTestResults@2 inputs: testResultsFiles: "**/bazel-testlogs/**/test.xml" testRunTitle: "macOS" condition: always() - script: ./ci/flaky_test/run_process_xml_mac.sh displayName: "Process Test Results" env: TEST_TMPDIR: $(Build.SourcesDirectory) SLACK_TOKEN: $(SLACK_TOKEN) CI_TARGET: "MacOS" REPO_URI: $(Build.Repository.Uri) BUILD_URI: $(Build.BuildUri) - job: Windows dependsOn: ["format"] timeoutInMinutes: 360 pool: vmImage: "windows-latest" steps: - bash: ci/run_envoy_docker.sh ci/windows_ci_steps.sh displayName: "Run Windows CI" env: ENVOY_DOCKER_BUILD_DIR: "$(Build.StagingDirectory)" ENVOY_RBE: "true" BAZEL_BUILD_EXTRA_OPTIONS: "--config=remote-ci --config=remote-msvc-cl --jobs=$(RbeJobs)" BAZEL_REMOTE_CACHE: grpcs://remotebuildexecution.googleapis.com BAZEL_REMOTE_INSTANCE: projects/envoy-ci/instances/default_instance GCP_SERVICE_ACCOUNT_KEY: $(GcpServiceAccountKey) - task: PublishBuildArtifacts@1 inputs: pathtoPublish: "$(Build.StagingDirectory)/envoy" artifactName: windows.release condition: always() ================================================ FILE: .bazelci/presubmit.yml ================================================ --- tasks: rbe: name: "RBE" platform: ubuntu1804 test_targets: - "//test/common/common/..." - "//test/integration/..." - "//test/exe/..." test_flags: - "--config=remote-clang-libc++" - "--config=remote-ci" - "--jobs=75" coverage: name: "Coverage" platform: ubuntu1804 shell_commands: - "bazel/setup_clang.sh /usr/lib/llvm-10" test_targets: - "//test/common/common/..." - "//test/integration/..." - "//test/exe/..." test_flags: - "--config=coverage" - "--config=clang" ================================================ FILE: .bazelignore ================================================ api examples/grpc-bridge/script tools/clang_tools ================================================ FILE: .bazelrc ================================================ # Envoy specific Bazel build/test options. # Bazel doesn't need more than 200MB of memory for local build based on memory profiling: # https://docs.bazel.build/versions/master/skylark/performance.html#memory-profiling # The default JVM max heapsize is 1/4 of physical memory up to 32GB which could be large # enough to consume all memory constrained by cgroup in large host, which is the case in CircleCI. # Limiting JVM heapsize here to let it do GC more when approaching the limit to # leave room for compiler/linker. # The number 2G is choosed heuristically to both support in CircleCI and large enough for RBE. # Startup options cannot be selected via config. startup --host_jvm_args=-Xmx2g build --workspace_status_command="bash bazel/get_workspace_status" build --experimental_strict_action_env=true build --host_force_python=PY3 build --action_env=BAZEL_LINKLIBS=-l%:libstdc++.a build --action_env=BAZEL_LINKOPTS=-lm build --host_javabase=@bazel_tools//tools/jdk:remote_jdk11 build --javabase=@bazel_tools//tools/jdk:remote_jdk11 build --enable_platform_specific_config # Enable position independent code, this option is not supported on Windows and default on on macOS. build:linux --copt=-fPIC build:linux --cxxopt=-std=c++17 build:linux --conlyopt=-fexceptions build:linux --fission=dbg,opt build:linux --features=per_object_debug_info # We already have absl in the build, define absl=1 to tell googletest to use absl for backtrace. build --define absl=1 # Pass PATH, CC, CXX and LLVM_CONFIG variables from the environment. build --action_env=CC build --action_env=CXX build --action_env=LLVM_CONFIG build --action_env=PATH # Common flags for sanitizers build:sanitizer --define tcmalloc=disabled build:sanitizer --linkopt -ldl build:sanitizer --build_tag_filters=-no_san build:sanitizer --test_tag_filters=-no_san # Common flags for Clang build:clang --action_env=BAZEL_COMPILER=clang build:clang --linkopt=-fuse-ld=lld # Basic ASAN/UBSAN that works for gcc build:asan --action_env=ENVOY_ASAN=1 build:asan --config=sanitizer # ASAN install its signal handler, disable ours so the stacktrace will be printed by ASAN build:asan --define signal_trace=disabled build:asan --define ENVOY_CONFIG_ASAN=1 build:asan --copt -fsanitize=address,undefined build:asan --linkopt -fsanitize=address,undefined # vptr and function sanitizer are enabled in clang-asan if it is set up via bazel/setup_clang.sh. build:asan --copt -fno-sanitize=vptr,function build:asan --linkopt -fno-sanitize=vptr,function build:asan --copt -DADDRESS_SANITIZER=1 build:asan --copt -D__SANITIZE_ADDRESS__ build:asan --test_env=ASAN_OPTIONS=handle_abort=1:allow_addr2line=true:check_initialization_order=true:strict_init_order=true:detect_odr_violation=1 build:asan --test_env=UBSAN_OPTIONS=halt_on_error=true:print_stacktrace=1 build:asan --test_env=ASAN_SYMBOLIZER_PATH # Clang ASAN/UBSAN build:clang-asan --config=asan build:clang-asan --linkopt -fuse-ld=lld # macOS ASAN/UBSAN build:macos --cxxopt=-std=c++17 build:macos-asan --config=asan # Workaround, see https://github.com/bazelbuild/bazel/issues/6932 build:macos-asan --copt -Wno-macro-redefined build:macos-asan --copt -D_FORTIFY_SOURCE=0 # Workaround, see https://github.com/bazelbuild/bazel/issues/4341 build:macos-asan --copt -DGRPC_BAZEL_BUILD # Dynamic link cause issues like: `dyld: malformed mach-o: load commands size (59272) > 32768` build:macos-asan --dynamic_mode=off # Clang TSAN build:clang-tsan --action_env=ENVOY_TSAN=1 build:clang-tsan --config=sanitizer build:clang-tsan --define ENVOY_CONFIG_TSAN=1 build:clang-tsan --copt -fsanitize=thread build:clang-tsan --linkopt -fsanitize=thread build:clang-tsan --linkopt -fuse-ld=lld build:clang-tsan --build_tag_filters=-no_san,-no_tsan build:clang-tsan --test_tag_filters=-no_san,-no_tsan # Needed due to https://github.com/libevent/libevent/issues/777 build:clang-tsan --copt -DEVENT__DISABLE_DEBUG_MODE # https://github.com/abseil/abseil-cpp/issues/760 # https://github.com/google/sanitizers/issues/953 build:clang-tsan --test_env="TSAN_OPTIONS=report_atomic_races=0" # Clang MSAN - this is the base config for remote-msan and docker-msan. To run this config without # our build image, follow https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo # with libc++ instruction and provide corresponding `--copt` and `--linkopt` as well. build:clang-msan --action_env=ENVOY_MSAN=1 build:clang-msan --config=sanitizer build:clang-msan --define ENVOY_CONFIG_MSAN=1 build:clang-msan --copt -fsanitize=memory build:clang-msan --linkopt -fsanitize=memory build:clang-msan --copt -fsanitize-memory-track-origins=2 # MSAN needs -O1 to get reasonable performance. build:clang-msan --copt -O1 # Clang with libc++ build:libc++ --config=clang build:libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:libc++ --action_env=LDFLAGS=-stdlib=libc++ build:libc++ --action_env=BAZEL_CXXOPTS=-stdlib=libc++ build:libc++ --action_env=BAZEL_LINKLIBS=-l%:libc++.a:-l%:libc++abi.a build:libc++ --action_env=BAZEL_LINKOPTS=-lm:-pthread build:libc++ --define force_libcpp=enabled # Optimize build for binary size reduction. build:sizeopt -c opt --copt -Os # Test options build --test_env=HEAPCHECK=normal --test_env=PPROF_PATH # Coverage options coverage --config=coverage coverage --build_tests_only build:coverage --action_env=BAZEL_USE_LLVM_NATIVE_COVERAGE=1 build:coverage --action_env=GCOV=llvm-profdata build:coverage --copt=-DNDEBUG # 1.5x original timeout + 300s for trace merger in all categories build:coverage --test_timeout=390,750,1500,5700 build:coverage --define=dynamic_link_tests=true build:coverage --define=ENVOY_CONFIG_COVERAGE=1 build:coverage --cxxopt="-DENVOY_CONFIG_COVERAGE=1" build:coverage --coverage_support=@envoy//bazel/coverage:coverage_support build:coverage --test_env=CC_CODE_COVERAGE_SCRIPT=external/envoy/bazel/coverage/collect_cc_coverage.sh build:coverage --test_env=HEAPCHECK= build:coverage --combined_report=lcov build:coverage --strategy=TestRunner=sandboxed,local build:coverage --strategy=CoverageReport=sandboxed,local build:coverage --experimental_use_llvm_covmap build:coverage --collect_code_coverage build:coverage --test_tag_filters=-nocoverage build:coverage --instrumentation_filter="//source(?!/common/chromium_url|/extensions/quic_listeners/quiche/platform)[/:],//include[/:]" coverage:test-coverage --test_arg="-l trace" coverage:fuzz-coverage --config=plain-fuzzer coverage:fuzz-coverage --run_under=@envoy//bazel/coverage:fuzz_coverage_wrapper.sh # Remote execution: https://docs.bazel.build/versions/master/remote-execution.html build:rbe-toolchain --action_env=BAZEL_DO_NOT_DETECT_CPP_TOOLCHAIN=1 build:rbe-toolchain-clang --config=rbe-toolchain build:rbe-toolchain-clang --platforms=@rbe_ubuntu_clang//config:platform build:rbe-toolchain-clang --host_platform=@rbe_ubuntu_clang//config:platform build:rbe-toolchain-clang --crosstool_top=@rbe_ubuntu_clang//cc:toolchain build:rbe-toolchain-clang --extra_toolchains=@rbe_ubuntu_clang//config:cc-toolchain build:rbe-toolchain-clang --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin build:rbe-toolchain-clang-libc++ --config=rbe-toolchain build:rbe-toolchain-clang-libc++ --platforms=@rbe_ubuntu_clang_libcxx//config:platform build:rbe-toolchain-clang-libc++ --host_platform=@rbe_ubuntu_clang_libcxx//config:platform build:rbe-toolchain-clang-libc++ --crosstool_top=@rbe_ubuntu_clang_libcxx//cc:toolchain build:rbe-toolchain-clang-libc++ --extra_toolchains=@rbe_ubuntu_clang_libcxx//config:cc-toolchain build:rbe-toolchain-clang-libc++ --action_env=CC=clang --action_env=CXX=clang++ --action_env=PATH=/usr/sbin:/usr/bin:/sbin:/bin:/opt/llvm/bin build:rbe-toolchain-clang-libc++ --action_env=CXXFLAGS=-stdlib=libc++ build:rbe-toolchain-clang-libc++ --action_env=LDFLAGS=-stdlib=libc++ build:rbe-toolchain-clang-libc++ --define force_libcpp=enabled build:rbe-toolchain-msan --linkopt=-L/opt/libcxx_msan/lib build:rbe-toolchain-msan --linkopt=-Wl,-rpath,/opt/libcxx_msan/lib build:rbe-toolchain-msan --config=clang-msan build:rbe-toolchain-tsan --linkopt=-L/opt/libcxx_tsan/lib build:rbe-toolchain-tsan --linkopt=-Wl,-rpath,/opt/libcxx_tsan/lib build:rbe-toolchain-tsan --config=clang-tsan build:rbe-toolchain-gcc --config=rbe-toolchain build:rbe-toolchain-gcc --platforms=@rbe_ubuntu_gcc//config:platform build:rbe-toolchain-gcc --host_platform=@rbe_ubuntu_gcc//config:platform build:rbe-toolchain-gcc --crosstool_top=@rbe_ubuntu_gcc//cc:toolchain build:rbe-toolchain-gcc --extra_toolchains=@rbe_ubuntu_gcc//config:cc-toolchain build:rbe-toolchain-msvc-cl --host_platform=@rbe_windows_msvc_cl//config:platform build:rbe-toolchain-msvc-cl --platforms=@rbe_windows_msvc_cl//config:platform build:rbe-toolchain-msvc-cl --crosstool_top=@rbe_windows_msvc_cl//cc:toolchain build:rbe-toolchain-msvc-cl --extra_toolchains=@rbe_windows_msvc_cl//config:cc-toolchain build:rbe-toolchain-clang-cl --host_platform=@rbe_windows_clang_cl//config:platform build:rbe-toolchain-clang-cl --platforms=@rbe_windows_clang_cl//config:platform build:rbe-toolchain-clang-cl --crosstool_top=@rbe_windows_clang_cl//cc:toolchain build:rbe-toolchain-clang-cl --extra_toolchains=@rbe_windows_clang_cl//config:cc-toolchain build:remote --spawn_strategy=remote,sandboxed,local build:remote --strategy=Javac=remote,sandboxed,local build:remote --strategy=Closure=remote,sandboxed,local build:remote --strategy=Genrule=remote,sandboxed,local # rules_rust is not remote runnable (yet) build:remote --strategy=Rustc=sandboxed,local build:remote --remote_timeout=7200 build:remote --auth_enabled=true build:remote --remote_download_toplevel # Windows bazel does not allow sandboxed as a spawn strategy build:remote-windows --spawn_strategy=remote,local build:remote-windows --strategy=Javac=remote,local build:remote-windows --strategy=Closure=remote,local build:remote-windows --strategy=Genrule=remote,local build:remote-windows --remote_timeout=7200 build:remote-windows --auth_enabled=true build:remote-windows --remote_download_toplevel build:remote-clang --config=remote build:remote-clang --config=rbe-toolchain-clang build:remote-clang-libc++ --config=remote build:remote-clang-libc++ --config=rbe-toolchain-clang-libc++ build:remote-gcc --config=remote build:remote-gcc --config=rbe-toolchain-gcc build:remote-msan --config=remote build:remote-msan --config=rbe-toolchain-clang-libc++ build:remote-msan --config=rbe-toolchain-msan build:remote-msvc-cl --config=remote-windows build:remote-msvc-cl --config=msvc-cl build:remote-msvc-cl --config=rbe-toolchain-msvc-cl build:remote-clang-cl --config=remote-windows build:remote-clang-cl --config=clang-cl build:remote-clang-cl --config=rbe-toolchain-clang-cl # Docker sandbox # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 build:docker-sandbox --experimental_docker_image=envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a build:docker-sandbox --spawn_strategy=docker build:docker-sandbox --strategy=Javac=docker build:docker-sandbox --strategy=Closure=docker build:docker-sandbox --strategy=Genrule=docker build:docker-sandbox --define=EXECUTOR=remote build:docker-sandbox --experimental_docker_verbose build:docker-sandbox --experimental_enable_docker_sandbox build:docker-clang --config=docker-sandbox build:docker-clang --config=rbe-toolchain-clang build:docker-clang-libc++ --config=docker-sandbox build:docker-clang-libc++ --config=rbe-toolchain-clang-libc++ build:docker-gcc --config=docker-sandbox build:docker-gcc --config=rbe-toolchain-gcc build:docker-msan --config=docker-sandbox build:docker-msan --config=rbe-toolchain-clang-libc++ build:docker-msan --config=rbe-toolchain-msan build:docker-tsan --config=docker-sandbox build:docker-tsan --config=rbe-toolchain-clang-libc++ build:docker-tsan --config=rbe-toolchain-tsan # CI configurations build:remote-ci --remote_cache=grpcs://remotebuildexecution.googleapis.com build:remote-ci --remote_executor=grpcs://remotebuildexecution.googleapis.com # Fuzz builds # -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION is passed in in the bazel build target # rules for fuzz tests. Passing it in the CLI will cause dependencies to be build # with the macro. Causing issues in RouteMatcherTest.TestRoutes that expect prod # behavior from RE2 library. build:asan-fuzzer --config=asan build:asan-fuzzer --define=FUZZING_ENGINE=libfuzzer build:asan-fuzzer --copt=-fsanitize=fuzzer-no-link build:asan-fuzzer --copt=-fno-omit-frame-pointer # Remove UBSAN halt_on_error to avoid crashing on protobuf errors. build:asan-fuzzer --test_env=UBSAN_OPTIONS=print_stacktrace=1 # Fuzzing without ASAN. This is useful for profiling fuzzers without any ASAN artifacts. build:plain-fuzzer --define=FUZZING_ENGINE=libfuzzer build:plain-fuzzer --define ENVOY_CONFIG_ASAN=1 build:plain-fuzzer --copt=-fsanitize=fuzzer-no-link build:plain-fuzzer --linkopt=-fsanitize=fuzzer-no-link # Compile database generation config build:compdb --build_tag_filters=-nocompdb # Windows build quirks build:windows --action_env=TMPDIR build:windows --define signal_trace=disabled build:windows --define hot_restart=disabled build:windows --define tcmalloc=disabled build:windows --define manual_stamp=manual_stamp # Should not be required after upstream fix to bazel, # and already a no-op to linux/macos builds # see issue https://github.com/bazelbuild/rules_foreign_cc/issues/301 build:windows --copt="-DCARES_STATICLIB" build:windows --copt="-DNGHTTP2_STATICLIB" build:windows --copt="-DCURL_STATICLIB" build:windows --cxxopt="/std:c++17" # Required to work around build defects on Windows MSVC cl # Unguarded gcc pragmas in quiche are not recognized by MSVC build:msvc-cl --copt="/wd4068" # Allows 'nodiscard' function return values to be discarded build:msvc-cl --copt="/wd4834" # Allows inline functions to be undefined build:msvc-cl --copt="/wd4506" build:msvc-cl --copt="-D_SILENCE_EXPERIMENTAL_FILESYSTEM_DEPRECATION_WARNING" # Required to work around Windows clang-cl build defects # Ignore conflicting definitions of _WIN32_WINNT # Overriding __TIME__ etc is problematic (and is actually an invalid no-op) build:clang-cl --copt="-Wno-macro-redefined" build:clang-cl --copt="-Wno-builtin-macro-redefined" build:clang-cl --action_env=USE_CLANG_CL=1 # Defaults to 'auto' - Off for windows, so override to linux behavior build:windows --enable_runfiles=yes # This should become adopted by bazel as the default build:windows --features=compiler_param_file # These options attempt to force a monolithic binary including the CRT build:windows --features=fully_static_link build:windows --features=static_link_msvcrt build:windows --dynamic_mode=off try-import %workspace%/clang.bazelrc try-import %workspace%/user.bazelrc try-import %workspace%/local_tsan.bazelrc ================================================ FILE: .bazelversion ================================================ 3.4.1 ================================================ FILE: .circleci/config.yml ================================================ version: 2.1 executors: ubuntu-build: description: "A regular build executor based on ubuntu image" docker: # NOTE: Update this from https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8 - image: envoyproxy/envoy-build-ubuntu:b480535e8423b5fd7c102fd30c92f4785519e33a resource_class: xlarge working_directory: /source jobs: api: executor: ubuntu-build steps: - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - checkout - run: ci/do_circle_ci.sh bazel.api - add_ssh_keys: fingerprints: - "fb:f3:fe:be:1c:b2:ec:b6:25:f9:7b:a6:87:54:02:8c" - run: ci/api_mirror.sh - store_artifacts: path: /build/envoy/generated destination: / go_control_plane_mirror: executor: ubuntu-build steps: - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - checkout - run: ci/do_circle_ci.sh bazel.api - add_ssh_keys: fingerprints: - "9d:3b:fe:7c:09:3b:ce:a9:6a:de:de:41:fb:6b:52:62" - run: ci/go_mirror.sh filter_example_mirror: executor: ubuntu-build steps: - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - checkout - add_ssh_keys: fingerprints: - "f6:f9:df:90:9c:4b:5f:9c:f4:69:fd:42:94:ff:88:24" - run: ci/filter_example_mirror.sh docs: executor: ubuntu-build steps: - run: rm -rf /home/circleci/project/.git # CircleCI git caching is likely broken - checkout - run: ci/do_circle_ci.sh docs - add_ssh_keys: fingerprints: - "44:c7:a1:9e:f4:9e:a5:33:11:f1:0e:79:e1:55:c9:04" - run: docs/publish.sh - store_artifacts: path: generated/docs workflows: version: 2 all: jobs: - api - go_control_plane_mirror - filter_example_mirror - docs: filters: tags: only: /^v.*/ ================================================ FILE: .clang-format ================================================ --- Language: Cpp AccessModifierOffset: -2 ColumnLimit: 100 DerivePointerAlignment: false PointerAlignment: Left SortIncludes: false ... --- Language: Proto ColumnLimit: 100 SpacesInContainerLiterals: false AllowShortFunctionsOnASingleLine: false ReflowComments: false ... ================================================ FILE: .clang-tidy ================================================ Checks: '-clang-analyzer-core.NonNullParamChecker, -clang-analyzer-optin.cplusplus.UninitializedObject, abseil-duration-*, abseil-faster-strsplit-delimiter, abseil-no-namespace, abseil-redundant-strcat-calls, abseil-str-cat-append, abseil-string-find-startswith, abseil-upgrade-duration-conversions, bugprone-assert-side-effect, bugprone-unused-raii, bugprone-use-after-move, clang-analyzer-core.DivideZero, misc-unused-using-decls, modernize-deprecated-headers, modernize-loop-convert, modernize-make-shared, modernize-make-unique, modernize-return-braced-init-list, modernize-use-default-member-init, modernize-use-equals-default, modernize-use-nullptr, modernize-use-override, modernize-use-using, performance-faster-string-find, performance-for-range-copy, performance-inefficient-algorithm, performance-inefficient-vector-operation, performance-noexcept-move-constructor, performance-move-constructor-init, performance-type-promotion-in-math-fn, performance-unnecessary-copy-initialization, readability-braces-around-statements, readability-container-size-empty, readability-identifier-naming, readability-redundant-control-flow, readability-redundant-member-init, readability-redundant-smartptr-get, readability-redundant-string-cstr' WarningsAsErrors: '*' CheckOptions: - key: bugprone-assert-side-effect.AssertMacros value: 'ASSERT' - key: bugprone-dangling-handle.HandleClasses value: 'std::basic_string_view;std::experimental::basic_string_view;absl::string_view' - key: modernize-use-auto.MinTypeNameLength value: '10' - key: readability-identifier-naming.ClassCase value: 'CamelCase' - key: readability-identifier-naming.EnumCase value: 'CamelCase' - key: readability-identifier-naming.EnumConstantCase value: 'CamelCase' - key: readability-identifier-naming.ParameterCase value: 'lower_case' - key: readability-identifier-naming.PrivateMemberCase value: 'lower_case' - key: readability-identifier-naming.PrivateMemberSuffix value: '_' - key: readability-identifier-naming.StructCase value: 'CamelCase' - key: readability-identifier-naming.TypeAliasCase value: 'CamelCase' - key: readability-identifier-naming.UnionCase value: 'CamelCase' - key: readability-identifier-naming.FunctionCase value: 'camelBack' ================================================ FILE: .devcontainer/.gitignore ================================================ devcontainer.env ================================================ FILE: .devcontainer/Dockerfile ================================================ FROM gcr.io/envoy-ci/envoy-build:b480535e8423b5fd7c102fd30c92f4785519e33a ARG USERNAME=vscode ARG USER_UID=501 ARG USER_GID=$USER_UID ENV BUILD_DIR=/build ENV ENVOY_STDLIB=libstdc++ ENV DEBIAN_FRONTEND=noninteractive RUN apt-get -y update \ && apt-get -y install --no-install-recommends libpython2.7 net-tools psmisc vim 2>&1 \ # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user. && groupadd --gid $USER_GID $USERNAME \ && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME -G pcap -d /build \ # [Optional] Add sudo support for non-root user && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME \ && chmod 0440 /etc/sudoers.d/$USERNAME ENV DEBIAN_FRONTEND= ENV PATH=/opt/llvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin ENV CLANG_FORMAT=/opt/llvm/bin/clang-format ================================================ FILE: .devcontainer/README.md ================================================ # Envoy Dev Container (experimental) This directory contains some experimental tools for Envoy Development in [VSCode Remote - Containers](https://code.visualstudio.com/docs/remote/containers). ## How to use Open with VSCode with the Container extension installed. Follow the [official guide](https://code.visualstudio.com/docs/remote/containers) to open this repository directly from GitHub or from checked-out source tree. After opening, run the `Refresh Compilation Database` task to generate compilation database to navigate in source code. This will run partial build of Envoy and may take a while depends on the machine performance. This task is needed to run everytime after: - Changing a BUILD file that add/remove files from a target, changes dependencies - Changing API proto files There are additional tools for VS Code located in [`tools/vscode`](../tools/vscode) directory. ## Advanced Usages ### Using Remote Build Execution Write the following content to `devcontainer.env` and rebuild the container. The key will be persisted in the container's `~/.bazelrc`. ``` GCP_SERVICE_ACCOUNT_KEY= BAZEL_REMOTE_INSTANCE= BAZEL_REMOTE_CACHE=grpcs://remotebuildexecution.googleapis.com BAZEL_BUILD_EXTRA_OPTIONS=--config=remote-ci --config=remote --jobs= ``` By default the `--config=remote` implies [`--remote_download_toplevel`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--remote_download_toplevel), change this to `minimal` or `all` depending on where you're running the container by adding them to `BAZEL_BUILD_EXTRA_OPTIONS`. ### Disk performance Docker for Mac/Windows is known to have disk performance issue, this makes formatting all files in the container very slow. [Update the mount consistency to 'delegated'](https://code.visualstudio.com/docs/remote/containers-advanced#_update-the-mount-consistency-to-delegated-for-macos) is recommended. ================================================ FILE: .devcontainer/devcontainer.json ================================================ { "name": "Envoy Dev", "dockerFile": "Dockerfile", "runArgs": [ "--user=vscode", "--cap-add=SYS_PTRACE", "--cap-add=NET_RAW", "--cap-add=NET_ADMIN", "--security-opt=seccomp=unconfined", "--volume=${env:HOME}:${env:HOME}", "--volume=envoy-build:/build", // Uncomment next line if you have devcontainer.env // "--env-file=.devcontainer/devcontainer.env" ], "settings": { "terminal.integrated.shell.linux": "/bin/bash", "bazel.buildifierFixOnFormat": true, "clangd.path": "/opt/llvm/bin/clangd", "python.pythonPath": "/usr/bin/python3", "python.formatting.provider": "yapf", "python.formatting.yapfArgs": [ "--style=${workspaceFolder}/tools/code_format/.style.yapf" ], "files.exclude": { "**/.clangd/**": true, "**/bazel-*/**": true }, "files.watcherExclude": { "**/.clangd/**": true, "**/bazel-*/**": true } }, "remoteUser": "vscode", "containerUser": "vscode", "postCreateCommand": ".devcontainer/setup.sh", "extensions": [ "github.vscode-pull-request-github", "zxh404.vscode-proto3", "bazelbuild.vscode-bazel", "llvm-vs-code-extensions.vscode-clangd", "vadimcn.vscode-lldb", "webfreak.debug", "ms-python.python" ] } ================================================ FILE: .devcontainer/setup.sh ================================================ #!/usr/bin/env bash . ci/setup_cache.sh trap - EXIT # Don't remove the key file written into a temporary file BAZELRC_FILE=~/.bazelrc bazel/setup_clang.sh /opt/llvm # Use generated toolchain config because we know the base container is the one we're using in RBE. # Not using libc++ here because clangd will raise some tidy issue in libc++ header as of version 9. echo "build --config=rbe-toolchain-clang" >> ~/.bazelrc echo "build ${BAZEL_BUILD_EXTRA_OPTIONS}" | tee -a ~/.bazelrc # Ideally we want this line so bazel doesn't pollute things outside of the devcontainer, but some of # API tooling (proto_sync) depends on symlink like bazel-bin. # TODO(lizan): Fix API tooling and enable this again #echo "build --symlink_prefix=/" >> ~/.bazelrc [[ -n "${BUILD_DIR}" ]] && sudo chown -R "$(id -u):$(id -g)" "${BUILD_DIR}" ================================================ FILE: .gitattributes ================================================ /docs/root/version_history/current.rst merge=union /api/envoy/**/v4alpha/* linguist-generated=true /generated_api_shadow/envoy/** linguist-generated=true /generated_api_shadow/bazel/** linguist-generated=true *.svg binary /test/**/*_corpus/* linguist-generated=true ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false contact_links: - name: "Crash bug" url: https://github.com/envoyproxy/envoy/security/policy about: "Please file any crash bug with envoy-security@googlegroups.com." ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.md ================================================ --- name: Feature request about: Suggest an idea for this project title: '' labels: enhancement,triage assignees: '' --- *Title*: *One line description* *Description*: >Describe the the desired behavior, what scenario it enables and how it would be used. [optional *Relevant Links*:] >Any extra documentation required to understand the issue. ================================================ FILE: .github/ISSUE_TEMPLATE/non--crash-security--bug.md ================================================ --- name: Non-{crash,security} bug about: Bugs which are not crashes, DoS or other security issue title: '' labels: bug,triage assignees: '' --- **If you are reporting *any* crash or *any* potential security issue, *do not* open an issue in this repo. Please report the issue via emailing envoy-security@googlegroups.com where the issue will be triaged appropriately.** *Title*: *One line description* *Description*: >What issue is being seen? Describe what should be happening instead of the bug, for example: Envoy should not crash, the expected value isn't returned, etc. *Repro steps*: > Include sample requests, environment, etc. All data and inputs required to reproduce the bug. >**Note**: The [Envoy_collect tool](https://github.com/envoyproxy/envoy/blob/master/tools/envoy_collect/README.md) gathers a tarball with debug logs, config and the following admin endpoints: /stats, /clusters and /server_info. Please note if there are privacy concerns, sanitize the data prior to sharing the tarball/pasting. *Admin and Stats Output*: >Include the admin output for the following endpoints: /stats, /clusters, /routes, /server_info. For more information, refer to the [admin endpoint documentation.](https://www.envoyproxy.io/docs/envoy/latest/operations/admin) >**Note**: If there are privacy concerns, sanitize the data prior to sharing. *Config*: >Include the config used to configure Envoy. *Logs*: >Include the access logs and the Envoy logs. >**Note**: If there are privacy concerns, sanitize the data prior to sharing. *Call Stack*: > If the Envoy binary is crashing, a call stack is **required**. Please refer to the [Bazel Stack trace documentation](https://github.com/envoyproxy/envoy/tree/master/bazel#stack-trace-symbol-resolution). ================================================ FILE: .github/ISSUE_TEMPLATE/other.md ================================================ --- name: Other about: Questions, design proposals, tech debt, etc. title: '' labels: triage assignees: '' --- **If you are reporting *any* crash or *any* potential security issue, *do not* open an issue in this repo. Please report the issue via emailing envoy-security@googlegroups.com where the issue will be triaged appropriately.** *Title*: *One line description* *Description*: >Describe the issue. [optional *Relevant Links*:] >Any extra documentation required to understand the issue. ================================================ FILE: .github/stale.yml ================================================ # Configuration for probot-stale - https://github.com/probot/stale # General configuration # Label to use when marking as stale staleLabel: stale # Pull request specific configuration pulls: # Number of days of inactivity before an Issue or Pull Request becomes stale daysUntilStale: 7 # Number of days of inactivity before a stale Issue or Pull Request is closed. # Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale. daysUntilClose: 7 # Comment to post when marking as stale. Set to `false` to disable markComment: > This pull request has been automatically marked as stale because it has not had activity in the last 7 days. It will be closed in 7 days if no further activity occurs. Please feel free to give a status update now, ping for review, or re-open when it's ready. Thank you for your contributions! # Comment to post when closing a stale Issue or Pull Request. closeComment: > This pull request has been automatically closed because it has not had activity in the last 14 days. Please feel free to give a status update now, ping for review, or re-open when it's ready. Thank you for your contributions! # Limit the number of actions per hour, from 1-30. Default is 30 limitPerRun: 1 exemptLabels: - no stalebot # Issue specific configuration issues: # TODO: Consider increasing the limitPerRun once we are satisfied with the bot's performance limitPerRun: 1 daysUntilStale: 30 daysUntilClose: 7 markComment: > This issue has been automatically marked as stale because it has not had activity in the last 30 days. It will be closed in the next 7 days unless it is tagged "help wanted" or other activity occurs. Thank you for your contributions. closeComment: > This issue has been automatically closed because it has not had activity in the last 37 days. If this issue is still valid, please ping a maintainer and ask them to label it as "help wanted". Thank you for your contributions. exemptLabels: - help wanted - no stalebot ================================================ FILE: .github/workflows/codeql-daily.yml ================================================ on: schedule: - cron: '0 12 * * 4' jobs: CodeQL-Build: strategy: fail-fast: false # CodeQL runs on ubuntu-latest and windows-latest runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v2 with: # We must fetch at least the immediate parents so that if this is # a pull request then we can checkout the head. fetch-depth: 2 # If this run was triggered by a pull request event, then checkout # the head of the pull request instead of the merge commit. - run: git checkout HEAD^2 if: ${{ github.event_name == 'pull_request' }} # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v1 # Override language selection by uncommenting this and choosing your languages with: languages: cpp - name: Install deps shell: bash run: | sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 mkdir -p bin/clang10 cd bin/clang10 wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz tar -xf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz --strip-components 1 export PATH=bin/clang10/bin:$PATH - name: Build run: | bazel/setup_clang.sh bin/clang10 bazelisk shutdown bazelisk build -c fastbuild --spawn_strategy=local --discard_analysis_cache --nouse_action_cache --config clang --config libc++ //source/common/http/... - name: Clean Artifacts run: | git clean -xdf - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v1 ================================================ FILE: .github/workflows/codeql-push.yml ================================================ on: push: paths: - 'source/common/**' pull_request: jobs: CodeQL-Build: strategy: fail-fast: false # CodeQL runs on ubuntu-latest and windows-latest runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v2 with: # We must fetch at least the immediate parents so that if this is # a pull request then we can checkout the head. fetch-depth: 2 - name: Get build targets run: | . .github/workflows/get_build_targets.sh echo ::set-env name=BUILD_TARGETS::$(echo $BUILD_TARGETS_LOCAL) # If this run was triggered by a pull request event, then checkout # the head of the pull request instead of the merge commit. - run: git checkout HEAD^2 if: ${{ github.event_name == 'pull_request' }} # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v1 # Override language selection by uncommenting this and choosing your languages with: languages: cpp - name: Install deps shell: bash run: | sudo apt-get update && sudo apt-get install libtool cmake automake autoconf make ninja-build curl unzip virtualenv openjdk-11-jdk build-essential libc++1 mkdir -p bin/clang10 cd bin/clang10 wget https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz tar -xf clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz --strip-components 1 export PATH=bin/clang10/bin:$PATH - name: Build run: | bazel/setup_clang.sh bin/clang10 bazelisk shutdown bazelisk build -c fastbuild --spawn_strategy=local --discard_analysis_cache --nouse_action_cache --config clang --config libc++ $BUILD_TARGETS echo -e "Built targets...\n$BUILD_TARGETS" - name: Clean Artifacts run: | git clean -xdf - name: Perform CodeQL Analysis if: env.BUILD_TARGETS != '' uses: github/codeql-action/analyze@v1 ================================================ FILE: .github/workflows/get_build_targets.sh ================================================ #!/bin/bash # This limits the directory that bazel query is going to search under. readonly SEARCH_FOLDER="//source/common/..." set -e -o pipefail function get_targets() { # Comparing the PR HEAD with the upstream master HEAD. git diff --name-only HEAD FETCH_HEAD | while IFS= read -r line do # Only targets under those folders. case "$line" in source/*|include/*) bazel query "rdeps($SEARCH_FOLDER, $line, 1)" 2>/dev/null ;; esac # This chain of commands from left to right are: # 1. Excluding the redundant .cc/.h targets that bazel query emits. # 2. Storing only the unique output. # 3. Limiting to the first 10 targets. done | grep -v '\.cc\|\.h' | sort -u | head -n 10 } # Fetching the upstream HEAD to compare with and stored in FETCH_HEAD. git fetch https://github.com/envoyproxy/envoy.git master 2>/dev/null export BUILD_TARGETS_LOCAL=$(echo $(get_targets)) ================================================ FILE: .gitignore ================================================ /bazel-* BROWSE /build /build_* *.bzlc .cache .clangd .classpath .clwb/ /ci/bazel-* compile_commands.json cscope.* .deps .devcontainer.json /docs/landing_source/.bundle /generated .idea/ .project *.pyc **/pyformat SOURCE_VERSION .settings/ *.sw* tags TAGS /test/coverage/BUILD /tools/spelling/.aspell.en.pws .vimrc .vs .vscode clang-tidy-fixes.yaml .gdb_history clang.bazelrc user.bazelrc CMakeLists.txt cmake-build-debug /linux bazel.output.txt *~ ================================================ FILE: .zuul/playbooks/envoy-build/run.yaml ================================================ - hosts: all become: yes roles: - role: config-gcc gcc_version: 7 - role: config-bazel bazel_version: 0.28.1 tasks: - name: Build envoy shell: cmd: | apt update apt-get update apt-get install -y \ libtool \ cmake \ automake \ autoconf \ make \ ninja-build \ curl \ unzip \ virtualenv bazel build //source/exe:envoy-static | tee $LOGS_PATH//bazel.txt cp -r ./bazel-bin $RESULTS_PATH chdir: '{{ zuul.project.src_dir }}' executable: /bin/bash environment: '{{ global_env }}' ================================================ FILE: .zuul.yaml ================================================ - project: name: envoyproxy/envoy check: jobs: - envoy-build-arm64 - job: name: envoy-build-arm64 parent: init-test description: | Envoy build in openlab cluster. run: .zuul/playbooks/envoy-build/run.yaml nodeset: ubuntu-xenial-arm64 voting: false ================================================ FILE: BUILD ================================================ licenses(["notice"]) # Apache 2 exports_files([ "VERSION", ".clang-format", ]) # These two definitions exist to help reduce Envoy upstream core code depending on extensions. # To avoid visibility problems, see notes in source/extensions/extensions_build_config.bzl # # TODO(#9953) //test/config_test:__pkg__ should probably be split up and removed. # TODO(#9953) the config fuzz tests should be moved somewhere local and //test/config_test and //test/server removed. package_group( name = "extension_config", packages = [ "//source/exe", "//source/extensions/...", "//test/config_test", "//test/extensions/...", "//test/server", "//test/server/config_validation", ], ) package_group( name = "extension_library", packages = [ "//source/extensions/...", "//test/extensions/...", ], ) ================================================ FILE: CODEOWNERS ================================================ # TODO(zuercher): determine how we want to deal with auto-assignment # By default, @envoyproxy/maintainers own everything. #* @envoyproxy/maintainers # api /api/ @envoyproxy/api-shepherds # access loggers /*/extensions/access_loggers/common @auni53 @zuercher # compression extensions /*/extensions/compression/common/compressor @rojkov @junr03 /*/extensions/compression/gzip/compressor @rojkov @junr03 # csrf extension /*/extensions/filters/http/csrf @dschaller @mattklein123 # original_src http filter extension /*/extensions/filters/http/original_src @snowp @klarose # original_src listener filter extension /*/extensions/filters/listener/original_src @snowp @klarose # original_src common extension extensions/filters/common/original_src @snowp @klarose # dubbo_proxy extension /*/extensions/filters/network/dubbo_proxy @zyfjeff @lizan # rocketmq_proxy extension /*/extensions/filters/network/rocketmq_proxy @aaron-ai @lizhanhui @lizan # thrift_proxy extension /*/extensions/filters/network/thrift_proxy @zuercher @rgs1 # cdn_loop extension /*/extensions/filters/http/cdn_loop @justin-mp @penguingao @alyssawilk # compressor used by http compression filters /*/extensions/filters/http/common/compressor @gsagula @rojkov @dio /*/extensions/filters/http/compressor @rojkov @dio # jwt_authn http filter extension /*/extensions/filters/http/jwt_authn @qiwzhang @lizan # grpc_http1_reverse_bridge http filter extension /*/extensions/filters/http/grpc_http1_reverse_bridge @snowp @zuercher # header_to_metadata extension /*/extensions/filters/http/header_to_metadata @rgs1 @zuercher # alts transport socket extension /*/extensions/transport_sockets/alts @htuch @yangminzhu # tls transport socket extension /*/extensions/transport_sockets/tls @PiotrSikora @lizan # proxy protocol socket extension /*/extensions/transport_sockets/proxy_protocol @alyssawilk @wez470 # common transport socket /*/extensions/transport_sockets/common @alyssawilk @wez470 # sni_cluster extension /*/extensions/filters/network/sni_cluster @rshriram @lizan # sni_dynamic_forward_proxy extension /*/extensions/filters/network/sni_dynamic_forward_proxy @rshriram @lizan # tracers.datadog extension /*/extensions/tracers/datadog @cgilmour @palazzem @mattklein123 # tracers.xray extension /*/extensions/tracers/xray @marcomagdy @lavignes @mattklein123 # mysql_proxy extension /*/extensions/filters/network/mysql_proxy @rshriram @venilnoronha @mattklein123 # postgres_proxy extension /*/extensions/filters/network/postgres_proxy @fabriziomello @cpakulski @dio # quic extension /*/extensions/quic_listeners/ @alyssawilk @danzh2010 @mattklein123 @mpwarres @wu-bin # zookeeper_proxy extension /*/extensions/filters/network/zookeeper_proxy @rgs1 @snowp # redis cluster extension /*/extensions/clusters/redis @msukalski @henryyyang @mattklein123 /*/extensions/common/redis @msukalski @henryyyang @mattklein123 # dynamic forward proxy /*/extensions/clusters/dynamic_forward_proxy @mattklein123 @alyssawilk /*/extensions/common/dynamic_forward_proxy @mattklein123 @alyssawilk /*/extensions/filters/http/dynamic_forward_proxy @mattklein123 @alyssawilk # omit_canary_hosts retry predicate /*/extensions/retry/host/omit_canary_hosts @sriduth @snowp # HTTP caching extension /*/extensions/filters/http/cache @toddmgreer @jmarantz # aws_iam grpc credentials /*/extensions/grpc_credentials/aws_iam @lavignes @mattklein123 /*/extensions/common/aws @lavignes @mattklein123 # adaptive concurrency limit extension. /*/extensions/filters/http/adaptive_concurrency @tonya11en @mattklein123 # admission control extension. /*/extensions/filters/http/admission_control @tonya11en @mattklein123 # http inspector /*/extensions/filters/listener/http_inspector @yxue @PiotrSikora @lizan # attribute context /*/extensions/filters/common/expr @kyessenov @yangminzhu @lizan # webassembly access logger extensions /*/extensions/access_loggers/wasm @jplevyak @PiotrSikora @lizan # webassembly bootstrap extensions /*/extensions/bootstrap/wasm @jplevyak @PiotrSikora @lizan # webassembly http extensions /*/extensions/filters/http/wasm @jplevyak @PiotrSikora @lizan # webassembly network extensions /*/extensions/filters/network/wasm @jplevyak @PiotrSikora @lizan # webassembly common extension /*/extensions/common/wasm @jplevyak @PiotrSikora @lizan # common matcher /*/extensions/common/matcher @mattklein123 @yangminzhu # common crypto extension /*/extensions/common/crypto @lizan @PiotrSikora @bdecoste /*/extensions/common/proxy_protocol @alyssawilk @wez470 /*/extensions/common/sqlutils @cpakulski @dio /*/extensions/filters/http/grpc_http1_bridge @snowp @jose /*/extensions/filters/http/gzip @gsagula @dio /*/extensions/filters/http/fault @rshriram @alyssawilk /*/extensions/filters/common/fault @rshriram @alyssawilk /*/extensions/filters/http/grpc_json_transcoder @qiwzhang @lizan /*/extensions/filters/http/router @alyssawilk @mattklein123 @snowp /*/extensions/filters/http/ext_authz @gsagula @dio /*/extensions/filters/http/grpc_web @fengli79 @lizan /*/extensions/filters/http/grpc_stats @kyessenov @lizan /*/extensions/filters/http/squash @yuval-k @alyssawilk /*/extensions/filters/common/ext_authz @gsagula @dio /*/extensions/filters/common/original_src @klarose @snowp /*/extensions/filters/listener/tls_inspector @piotrsikora @htuch /*/extensions/grpc_credentials/example @wozz @htuch /*/extensions/grpc_credentials/file_based_metadata @wozz @htuch /*/extensions/internal_redirect @alyssawilk @penguingao /*/extensions/stat_sinks/dog_statsd @taiki45 @jmarantz /*/extensions/stat_sinks/hystrix @trabetti @jmarantz /*/extensions/stat_sinks/metrics_service @ramaraochavali @jmarantz # webassembly stat-sink extensions /*/extensions/stat_sinks/wasm @Aakash2017 @jplevyak @lizan /*/extensions/resource_monitors/injected_resource @eziskind @htuch /*/extensions/resource_monitors/common @eziskind @htuch /*/extensions/resource_monitors/fixed_heap @eziskind @htuch /*/extensions/retry/priority @snowp @alyssawilk /*/extensions/retry/priority/previous_priorities @snowp @alyssawilk /*/extensions/retry/host @snowp @alyssawilk /*/extensions/filters/network/http_connection_manager @alyssawilk @mattklein123 /*/extensions/filters/network/ext_authz @gsagula @dio /*/extensions/filters/network/tcp_proxy @alyssawilk @zuercher /*/extensions/filters/network/echo @htuch @alyssawilk /*/extensions/filters/udp/dns_filter @abaptiste @mattklein123 /*/extensions/filters/network/direct_response @kyessenov @zuercher /*/extensions/filters/udp/udp_proxy @mattklein123 @danzh2010 /*/extensions/clusters/aggregate @yxue @snowp # support for on-demand VHDS requests /*/extensions/filters/http/on_demand @dmitri-d @htuch @lambdai /*/extensions/filters/network/local_ratelimit @mattklein123 @junr03 /*/extensions/filters/http/aws_request_signing @rgs1 @derekargueta @mattklein123 @marcomagdy /*/extensions/filters/http/aws_lambda @mattklein123 @marcomagdy @lavignes # Compression /*/extensions/compression/common @junr03 @rojkov /*/extensions/compression/gzip @junr03 @rojkov /*/extensions/filters/http/decompressor @rojkov @dio # Watchdog Extensions /*/extensions/watchdog/profile_action @kbaichoo @antoniovicente /*/extensions/watchdog/abort_action @kbaichoo @antoniovicente # Core upstream code extensions/upstreams/http @alyssawilk @snowp @mattklein123 extensions/upstreams/http/http @alyssawilk @snowp @mattklein123 extensions/upstreams/http/tcp @alyssawilk @mattklein123 extensions/upstreams/http/default @alyssawilk @snowp @mattklein123 # OAuth2 extensions/filters/http/oauth2 @rgs1 @derekargueta @snowp # HTTP Local Rate Limit /*/extensions/filters/http/local_ratelimit @rgs1 @mattklein123 /*/extensions/filters/common/local_ratelimit @mattklein123 @rgs1 ================================================ FILE: CODE_OF_CONDUCT.md ================================================ ## Community Code of Conduct Envoy follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). ================================================ FILE: CONTRIBUTING.md ================================================ We welcome contributions from the community. Please read the following guidelines carefully to maximize the chances of your PR being merged. # Communication * Before starting work on a major feature, please reach out to us via GitHub, Slack, email, etc. We will make sure no one else is already working on it and ask you to open a GitHub issue. * A "major feature" is defined as any change that is > 100 LOC altered (not including tests), or changes any user-facing behavior. We will use the GitHub issue to discuss the feature and come to agreement. This is to prevent your time being wasted, as well as ours. The GitHub review process for major features is also important so that [organizations with commit access](OWNERS.md) can come to agreement on design. If it is appropriate to write a design document, the document must be hosted either in the GitHub tracking issue, or linked to from the issue and hosted in a world-readable location. * Specifically, if the goal is to add a new [extension](REPO_LAYOUT.md#sourceextensions-layout), please read the [extension policy](GOVERNANCE.md#extension-addition-policy). * Small patches and bug fixes don't need prior communication. # Coding style * See [STYLE.md](STYLE.md) # Inclusive language policy The Envoy community has an explicit goal to be inclusive to all. As such, all PRs must adhere to the following guidelines for all code, APIs, and documentation: * The following words and phrases are not allowed: * *Whitelist*: use allowlist instead. * *Blacklist*: use denylist or blocklist instead. * *Master*: use primary instead. * *Slave*: use secondary or replica instead. * Documentation should be written in an inclusive style. The [Google developer documentation](https://developers.google.com/style/inclusive-documentation) contains an excellent reference on this topic. * The above policy is not considered definitive and may be amended in the future as industry best practices evolve. Additional comments on this topic may be provided by maintainers during code review. # Breaking change policy Both API and implementation stability are important to Envoy. Since the API is consumed by clients beyond Envoy, it has a distinct set of [versioning guidelines](api/API_VERSIONING.md). Below, we articulate the Envoy implementation stability rules, which operate within the context of the API versioning guidelines: * Features may be marked as deprecated in a given versioned API at any point in time, but this may only be done when a replacement implementation and configuration path is available in Envoy on master. Deprecators must implement a conversion from the deprecated configuration to the latest `vNalpha` (with the deprecated field) that Envoy uses internally. A field may be deprecated if this tool would be able to perform the conversion. For example, removing a field to describe HTTP/2 window settings is valid if a more comprehensive HTTP/2 protocol options field is being introduced to replace it. The PR author deprecating the old configuration is responsible for updating all tests and canonical configuration, or guarding them with the `DEPRECATED_FEATURE_TEST()` macro. This will be validated by the `bazel.compile_time_options` target, which will hard-fail when deprecated configuration is used. The majority of tests and configuration for a feature should be expressed in terms of the latest Envoy internal configuration (i.e. `vNalpha`), only a minimal number of tests necessary to validate configuration translation should be guarded via the `DEPRECATED_FEATURE_TEST()` macro. * We will delete deprecated configuration across major API versions. E.g. a field marked deprecated in v2 will be removed in v3. * Unless the community and Envoy maintainer team agrees on an exception, during the first release cycle after a feature has been deprecated, use of that feature will cause a logged warning, and incrementing the [runtime](https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime#statistics) `runtime.deprecated_feature_use` stat. During the second release cycle, use of the deprecated configuration will cause a configuration load failure, unless the feature in question is explicitly overridden in [runtime](https://www.envoyproxy.io/docs/envoy/latest/configuration/operations/runtime#using-runtime-overrides-for-deprecated-features) config ([example](configs/using_deprecated_config.v2.yaml)). Finally, following the deprecation of the API major version where the field was first marked deprecated, the entire implementation code will be removed from the Envoy implementation. * This policy means that organizations deploying master should have some time to get ready for breaking changes at the next major API version. This is typically a window of at least 12 months or until the organization moves to the next major API version. * The breaking change policy also applies to source level extensions (e.g., filters). Code that conforms to the public interface documentation should continue to compile and work within the deprecation window. Within this window, a warning of deprecation should be carefully logged (some features might need rate limiting for logging this). We make no guarantees about code or deployments that rely on undocumented behavior. * All deprecations/breaking changes will be clearly listed in the [version history](docs/root/version_history/). * High risk deprecations/breaking changes may be announced to the [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list but by default it is expected the multi-phase warn-by-default/fail-by-default is sufficient to warn users to move away from deprecated features. # Submitting a PR * Fork the repo. * In your local repo, install the git hooks that implement various important pre-commit and pre-push checks: ``` ./support/bootstrap ``` Please see [support/README.md](support/README.md) for more information on these hooks. * Create your PR. * Tests will automatically run for you. * We will **not** merge any PR that is not passing tests. * PRs are expected to have 100% test coverage for added code. This can be verified with a coverage build. If your PR cannot have 100% coverage for some reason please clearly explain why when you open it. * Any PR that changes user-facing behavior **must** have associated documentation in [docs](docs) as well as [release notes](docs/root/version_history/current.rst). API changes should be documented inline with protos as per the [API contribution guidelines](api/CONTRIBUTING.md). If a change applies to multiple sections of the release notes, it should be noted in the first (most important) section that applies. For instance, a bug fix that introduces incompatible behavior should be noted in `Incompatible Behavior Changes` but not in `Bug Fixes`. * All code comments and documentation are expected to have proper English grammar and punctuation. If you are not a fluent English speaker (or a bad writer ;-)) please let us know and we will try to find some help but there are no guarantees. * Your PR title should be descriptive, and generally start with a subsystem name followed by a colon. Examples: * "docs: fix grammar error" * "http conn man: add new feature" * Your PR commit message will be used as the commit message when your PR is merged. You should update this field if your PR diverges during review. * Your PR description should have details on what the PR does. If it fixes an existing issue it should end with "Fixes #XXX". * If your PR is co-authored or based on an earlier PR from another contributor, please attribute them with `Co-authored-by: name `. See GitHub's [multiple author guidance](https://help.github.com/en/github/committing-changes-to-your-project/creating-a-commit-with-multiple-authors) for further details. * When all of the tests are passing and all other conditions described herein are satisfied, a maintainer will be assigned to review and merge the PR. * Once you submit a PR, *please do not rebase it*. It's much easier to review if subsequent commits are new commits and/or merges. We squash rebase the final merged commit so the number of commits you have in the PR don't matter. * We expect that once a PR is opened, it will be actively worked on until it is merged or closed. We reserve the right to close PRs that are not making progress. This is generally defined as no changes for 7 days. Obviously PRs that are closed due to lack of activity can be reopened later. Closing stale PRs helps us to keep on top of all of the work currently in flight. * If a commit deprecates a feature, the commit message must mention what has been deprecated. Additionally, the [version history](docs/root/version_history/current.rst) must be updated with relevant RST links for fields and messages as part of the commit. * Please consider joining the [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev) mailing list. * If your PR involves any changes to [envoy-filter-example](https://github.com/envoyproxy/envoy-filter-example) (for example making a new branch so that CI can pass) it is your responsibility to follow through with merging those changes back to master once the CI dance is done. * If your PR is a high risk change, the reviewer may ask that you runtime guard it. See the section on runtime guarding below. # Runtime guarding Some changes in Envoy are deemed worthy of runtime guarding. Instead of just replacing old code with new code, both code paths are supported for between one Envoy release (if it is guarded due to performance concerns) and a full deprecation cycle (if it is a high risk behavioral change). Generally as a community we try to guard both high risk changes (major refactors such as replacing Envoy's buffer implementation) and most user-visible non-config-guarded changes to protocol processing (for example additions or changes to HTTP headers or how HTTP is serialized out) for non-alpha features. Feel free to tag @envoyproxy/maintainers if you aren't sure if a given change merits runtime guarding. The canonical way to runtime guard a feature is ``` if (Runtime::runtimeFeatureEnabled("envoy.reloadable_features.my_feature_name")) { [new code path] } else { [old_code_path] } ``` Runtime guarded features named with the "envoy.reloadable_features." prefix must be safe to flip true or false on running Envoy instances. In some situations it may make more sense to latch the value in a member variable on class creation, for example: ``` bool use_new_code_path_ = Runtime::runtimeFeatureEnabled("envoy.reloadable_features.my_feature_name") ``` This should only be done if the lifetime of the object in question is relatively short compared to the lifetime of most Envoy instances, i.e. latching state on creation of the Http::ConnectionManagerImpl or all Network::ConnectionImpl classes, to ensure that the new behavior will be exercised as the runtime value is flipped, and that the old behavior will trail off over time. Runtime guarded features may either set true (running the new code by default) in the initial PR, after a testing interval, or during the next release cycle, at the PR author's and reviewing maintainer's discretion. Generally all runtime guarded features will be set true when a release is cut. Old code paths for refactors can be cleaned up after a release and there has been some production run time. Old code for behavioral changes will be deprecated after six months. Runtime features are set true by default by inclusion in [source/common/runtime/runtime_features.cc](https://github.com/envoyproxy/envoy/blob/master/source/common/runtime/runtime_features.cc) There are four suggested options for testing new runtime features: 1. Create a per-test Runtime::LoaderSingleton as done in [DeprecatedFieldsTest.IndividualFieldDisallowedWithRuntimeOverride](https://github.com/envoyproxy/envoy/blob/master/test/common/protobuf/utility_test.cc) 2. Create a [parameterized test](https://github.com/google/googletest/blob/master/googletest/docs/advanced.md#how-to-write-value-parameterized-tests) where the set up of the test sets the new runtime value explicitly to GetParam() as outlined in (1). 3. Set up integration tests with custom runtime defaults as documented in the [integration test README](https://github.com/envoyproxy/envoy/blob/master/test/integration/README.md) 4. Run a given unit test with the new runtime value explicitly set true or false as done for [runtime_flag_override_test](https://github.com/envoyproxy/envoy/blob/master/test/common/runtime/BUILD) Runtime code is held to the same standard as regular Envoy code, so both the old path and the new should have 100% coverage both with the feature defaulting true and false. # PR review policy for maintainers * Typically we try to turn around reviews within one business day. * See [OWNERS.md](OWNERS.md) for the current list of maintainers. * It is generally expected that a senior maintainer should review every PR. * It is also generally expected that a "domain expert" for the code the PR touches should review the PR. This person does not necessarily need to have commit access. * The previous two points generally mean that every PR should have two approvals. (Exceptions can be made by the senior maintainers). * The above rules may be waived for PRs which only update docs or comments, or trivial changes to tests and tools (where trivial is decided by the maintainer in question). * In general, we should also attempt to make sure that at least one of the approvals is *from an organization different from the PR author.* E.g., if Lyft authors a PR, at least one approver should be from an organization other than Lyft. This helps us make sure that we aren't putting organization specific shortcuts into the code. * If there is a question on who should review a PR please discuss in Slack. * Anyone is welcome to review any PR that they want, whether they are a maintainer or not. * Please make sure that the PR title, commit message, and description are updated if the PR changes significantly during review. * Please **clean up the title and body** before merging. By default, GitHub fills the squash merge title with the original title, and the commit body with every individual commit from the PR. The maintainer doing the merge should make sure the title follows the guidelines above and should overwrite the body with the original commit message from the PR (cleaning it up if necessary) while preserving the PR author's final DCO sign-off. * If a PR includes a deprecation/breaking change, notification should be sent to the [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce) email list. # Adding new extensions For developers adding a new extension, one can take an existing extension as the starting point. Extension configuration should be located in a directory structure like `api/envoy/extensions/area/plugin/`, for example `api/envoy/extensions/access_loggers/file/` The code for the extension should be located under the equivalent `source/extensions/area/plugin`, and include an *envoy_cc_extension* with the configuration and tagged with the appropriate security posture, and an *envoy_cc_library* with the code. More details on how to add a new extension API can be found [here](api/STYLE.md#adding-an-extension-configuration-to-the-api): Other changes will likely include * Editing [source/extensions/extensions_build_config.bzl](source/extensions/extensions_build_config.bzl) to include the new extensions * Editing [docs/root/api-v3/config/config.rst](docs/root/api-v3/config/config.rst) to add area/area * Adding `docs/root/api-v3/config/area/area.rst` to add a table of contents for the API docs * Adding `source/extensions/area/well_known_names.h` for registered plugins # DCO: Sign your work Envoy ships commit hooks that allow you to auto-generate the DCO signoff line if it doesn't exist when you run `git commit`. Simply navigate to the Envoy project root and run: ```bash ./support/bootstrap ``` From here, simply commit as normal, and you will see the signoff at the bottom of each commit. The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below (from [developercertificate.org](https://developercertificate.org/)): ``` Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 660 York Street, Suite 102, San Francisco, CA 94110 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` then you just add a line to every git commit message: Signed-off-by: Joe Smith using your real name (sorry, no pseudonyms or anonymous contributions.) You can add the sign off when creating the git commit via `git commit -s`. If you want this to be automatic you can set up some aliases: ```bash git config --add alias.amend "commit -s --amend" git config --add alias.c "commit -s" ``` ## Fixing DCO If your PR fails the DCO check, it's necessary to fix the entire commit history in the PR. Best practice is to [squash](https://gitready.com/advanced/2009/02/10/squashing-commits-with-rebase.html) the commit history to a single commit, append the DCO sign-off as described above, and [force push](https://git-scm.com/docs/git-push#git-push---force). For example, if you have 2 commits in your history: ```bash git rebase -i HEAD^^ (interactive squash + DCO append) git push origin -f ``` Note, that in general rewriting history in this way is a hindrance to the review process and this should only be done to correct a DCO mistake. ## Triggering CI re-run without making changes To rerun failed tasks in Circle-CI, add a comment with the line ``` /retest-circle ``` in it. This should rebuild only the failed tasks. To rerun failed tasks in Azure pipelines, add a comment with the line ``` /retest ``` in it. This should rebuild only the failed tasks. Sometimes tasks will be stuck in CI and won't be marked as failed, which means the above command won't work. Should this happen, pushing an empty commit should re-run all the CI tasks. Consider adding an alias into your `.gitconfig` file: ``` [alias] kick-ci = !"git commit -s --allow-empty -m 'Kick CI' && git push" ``` Once you add this alias you can issue the command `git kick-ci` and the PR will be sent back for a retest. ================================================ FILE: DCO ================================================ Developer Certificate of Origin Version 1.1 Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 1 Letterman Drive Suite D4700 San Francisco, CA, 94129 Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ================================================ FILE: DEPENDENCY_POLICY.md ================================================ # Envoy External Dependency Policy Envoy has an evolving policy on external dependencies, tracked at https://github.com/envoyproxy/envoy/issues/10471. This will become stricter over time, below we detail the policy as it currently applies. ## External dependencies dashboard The list of external dependencies in Envoy with their current version is available at https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/external_deps ## Declaring external dependencies In general, all external dependencies for the Envoy proxy binary build and test should be declared in either [bazel/repository_locations.bzl](bazel/repository_locations.bzl) or [api/bazel/repository_locations.bzl](api/bazel/repository_locations.bzl), unless listed under [policy exceptions](#policy-exceptions). An example entry for the `nghttp2` dependency is: ```python com_github_nghttp2_nghttp2 = dict( project_name = "Nghttp2", project_desc = "Implementation of HTTP/2 and its header compression ...", project_url = "https://nghttp2.org", version = "1.41.0", sha256 = "eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8", strip_prefix = "nghttp2-{version}", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"], use_category = ["dataplane"], last_updated = "2020-06-02", cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), ``` Dependency declarations must: * Provide a meaningful project name and URL. * State the version in the `version` field. String interpolation should be used in `strip_prefix` and `urls` to reference the version. If you need to reference version `X.Y.Z` as `X_Y_Z`, this may appear in a string as `{underscore_version}`, similarly for `X-Y-Z` you can use `{dash_version}`. * Versions should prefer release versions over master branch GitHub SHA tarballs. A comment is necessary if the latter is used. This comment should contain the reason that a non-release version is being used. * Provide accurate entries for `use_category`. Please think carefully about whether there are data or control plane implications of the dependency. * Reflect the date (YYYY-MM-DD) at which they were last updated in the `last_updated` field. This date is preferably the date at which the PR is created. * CPEs are compulsory for all dependencies that are not purely build/test. [CPEs](https://en.wikipedia.org/wiki/Common_Platform_Enumeration) provide metadata that allow us to correlate with related CVEs in dashboards and other tooling, and also provide a machine consumable join key. You can consult the latest [CPE dictionary](https://nvd.nist.gov/products/cpe) to find a CPE for a dependency.`"N/A"` should only be used if no CPE for the project is available in the CPE database. CPEs should be _versionless_ with a `:*` suffix, since the version can be computed from `version`. When build or test code references Python modules, they should be imported via `pip3_import` in [bazel/repositories_extra.bzl](bazel/repositories_extra.bzl). Python modules should not be listed in `repository_locations.bzl` entries. `requirements.txt` files for Python dependencies must pin to exact versions, e.g. `PyYAML==5.3.1` and ideally also include a [SHA256 checksum](https://davidwalsh.name/hashin). Pure developer tooling and documentation builds may reference Python via standalone `requirements.txt`, following the above policy. ## New external dependencies * Any new dependency on the Envoy data or control plane that impacts Envoy core (i.e. is not specific to a single non-core extension) must be cleared with the Envoy security team, please file an issue and tag [@envoyproxy/security-team](https://github.com/orgs/envoyproxy/teams/security-team). While policy is still [evolving](robust_to_untrusted_downstream_and_upstream), criteria that will be used in evaluation include: * Does the project have release versions? How often do releases happen? * Does the project have a security vulnerability disclosure process and contact details? * Does the project have effective governance, e.g. multiple maintainers, a governance policy? * Does the project have a code review culture? Are patches reviewed by independent maintainers prior to merge? * Does the project enable mandatory GitHub 2FA for contributors? * Does the project have evidence of high test coverage, fuzzing, static analysis (e.g. CodeQL), etc.? * Dependencies for extensions that are tagged as `robust_to_untrusted_downstream` or `robust_to_untrusted_downstream_and_upstream` should be sensitive to the same set of concerns as the core data plane. ## Maintaining existing dependencies We rely on community volunteers to help track the latest versions of dependencies. On a best effort basis: * Core Envoy dependencies will be updated by the Envoy maintainers/security team. * Extension [CODEOWNERS](CODEOWNERS) should update extension specific dependencies. Where possible, we prefer the latest release version for external dependencies, rather than master branch GitHub SHA tarballs. ## Dependency patches Occasionally it is necessary to introduce an Envoy-side patch to a dependency in a `.patch` file. These are typically applied in [bazel/repositories.bzl](bazel/repositories.bzl). Our policy on this is as follows: * Patch files impede dependency updates. They are expedient at creation time but are a maintenance penalty. They reduce the velocity and increase the effort of upgrades in response to security vulnerabilities in external dependencies. * No patch will be accepted without a sincere and sustained effort to upstream the patch to the dependency's canonical repository. * There should exist a plan-of-record, filed as an issue in Envoy or the upstream GitHub tracking elimination of the patch. * Every patch must have comments at its point-of-use in [bazel/repositories.bzl](bazel/repositories.bzl) providing a rationale and detailing the tracking issue. ## Policy exceptions The following dependencies are exempt from the policy: * Any developer-only facing tooling or the documentation build. * Transitive build time dependencies, e.g. Go projects vendored into [protoc-gen-validate](https://github.com/envoyproxy/protoc-gen-validate). ================================================ FILE: DEPRECATED.md ================================================ # DEPRECATED The [deprecated log](https://www.envoyproxy.io/docs/envoy/latest/version_history/version_history) for each version can be found in the official Envoy developer documentation. ================================================ FILE: DEVELOPER.md ================================================ # Developer documentation Envoy is built using the Bazel build system. CircleCI builds, tests, and runs coverage against all pull requests and the master branch. To get started building Envoy locally, see the [Bazel quick start](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#quick-start-bazel-build-for-developers). To run tests, there are Bazel [targets](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#testing-envoy-with-bazel) for Google Test. To generate a coverage report, there is a [coverage build script](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#coverage-builds). If you plan to contribute to Envoy, you may find it useful to install the Envoy [development support toolchain](https://github.com/envoyproxy/envoy/blob/master/support/README.md), which helps automate parts of the development process, particularly those involving code review. Below is a list of additional documentation to aid the development process: - [General build and installation documentation](https://www.envoyproxy.io/docs/envoy/latest/install/install) - [Building and testing Envoy with Bazel](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md) - [Managing external dependencies with Bazel](https://github.com/envoyproxy/envoy/blob/master/bazel/EXTERNAL_DEPS.md) - [Guide to Envoy Bazel rules (managing `BUILD` files)](https://github.com/envoyproxy/envoy/blob/master/bazel/DEVELOPER.md) - [Using Docker for building and testing](https://github.com/envoyproxy/envoy/tree/master/ci) - [Guide to contributing to Envoy](https://github.com/envoyproxy/envoy/blob/master/CONTRIBUTING.md) - [Overview of Envoy's testing frameworks](https://github.com/envoyproxy/envoy/blob/master/test/README.md) - [Overview of how to write integration tests for new code](https://github.com/envoyproxy/envoy/blob/master/test/integration/README.md) - [Envoy filter example project (how to consume and extend Envoy as a submodule)](https://github.com/envoyproxy/envoy-filter-example) - [Performance testing Envoy with `tcmalloc`/`pprof`](https://github.com/envoyproxy/envoy/blob/master/bazel/PPROF.md) And some documents on components of Envoy architecture: - [Envoy flow control](https://github.com/envoyproxy/envoy/blob/master/source/docs/flow_control.md) - [Envoy's subset load balancer](https://github.com/envoyproxy/envoy/blob/master/source/docs/subset_load_balancer.md) ================================================ FILE: EXTENSION_POLICY.md ================================================ # Envoy Extension Policy ## Quality requirements All extensions contained in the main Envoy repository will be held to the same quality bar as the core Envoy code. This includes coding style, code reviews, test coverage, etc. In the future we may consider creating a sandbox repository for extensions that are not compiled/tested by default and held to a lower quality standard, but that is out of scope currently. ## Adding new extensions The following procedure will be used when proposing new extensions for inclusion in the repository: 1. A GitHub issue should be opened describing the proposed extension as with any major feature proposal. 2. All extensions must be sponsored by an existing maintainer. Sponsorship means that the maintainer will shepherd the extension through design/code reviews. Maintainers can self-sponsor extensions if they are going to write them, shepherd them, and maintain them. Sponsorship serves two purposes: * It ensures that the extension will ultimately meet the Envoy quality bar. * It makes sure that incentives are aligned and that extensions are not added to the repo without sufficient thought put into future maintenance. *If sponsorship cannot be found from an existing maintainer, an organization can consider [doing the work to become a maintainer](./GOVERNANCE.md#process-for-becoming-a-maintainer) in order to be able to self-sponsor extensions.* 3. Each extension must have two reviewers proposed for reviewing PRs to the extension. Neither of the reviewers must be a senior maintainer. Existing maintainers (including the sponsor) and other contributors can count towards this number. The initial reviewers will be codified in the [CODEOWNERS](./CODEOWNERS) file for long term maintenance. These reviewers can be swapped out as needed. 4. Any extension added via this process becomes a full part of the repository. This means that any API breaking changes in the core code will be automatically fixed as part of the normal PR process by other contributors. 5. Any new dependencies added for this extension must comply with [DEPENDENCY_POLICY.md](DEPENDENCY_POLICY.md), please follow the steps detailed there. ## Removing existing extensions As stated in the previous section, once an extension becomes part of the repository it will be maintained by the collective set of Envoy contributors as needed. However, if an extension has known issues that are not being rectified by the original sponsor and reviewers or new contributors that are willing to step into the role of extension owner, a [vote of the maintainers](./GOVERNANCE.md#conflict-resolution-and-voting) can be called to remove the extension from the repository. ## Extension pull request reviews Extension PRs must not modify core Envoy code. In the event that an extension requires changes to core Envoy code, those changes should be submitted as a separate PR and will undergo the normal code review process, as documented in the [contributor's guide](./CONTRIBUTING.md). Extension PRs must be approved by at least one sponsoring maintainer and an extension reviewer. These may be a single individual, but it is always preferred to have multiple reviewers when feasible. In the event that the Extension PR author is a sponsoring maintainer and no other sponsoring maintainer is available, another maintainer may be enlisted to perform a minimal review for style and common C++ anti-patterns. The Extension PR must still be approved by a non-maintainer reviewer. ## Extension stability and security posture Every extension is expected to be tagged with a `status` and `security_posture` in its `envoy_cc_extension` rule. The `status` is one of: * `stable`: The extension is stable and is expected to be production usable. This is the default if no `status` is specified. * `alpha`: The extension is functional but has not had substantial production burn time, use only with this caveat. * `wip`: The extension is work-in-progress. Functionality is incomplete and it is not intended for production use. The extension status may be adjusted by the extension [CODEOWNERS](./CODEOWNERS) and/or Envoy maintainers based on an assessment of the above criteria. Note that the status of the extension reflects the implementation status. It is orthogonal to the API stability, for example, an extension with configuration `envoy.foo.v3alpha.Bar` might have a `stable` implementation and `envoy.foo.v3.Baz` can have a `wip` implementation. The `security_posture` is one of: * `robust_to_untrusted_downstream`: The extension is hardened against untrusted downstream traffic. It assumes that the upstream is trusted. * `robust_to_untrusted_downstream_and_upstream`: The extension is hardened against both untrusted downstream and upstream traffic. * `requires_trusted_downstream_and_upstream`: The extension is not hardened and should only be used in deployments where both the downstream and upstream are trusted. * `unknown`: This is functionally equivalent to `requires_trusted_downstream_and_upstream`, but acts as a placeholder to allow us to identify extensions that need classifying. * `data_plane_agnostic`: Not relevant to data plane threats, e.g. stats sinks. An assessment of a robust security posture for an extension is subject to the following guidelines: * Does the extension have fuzz coverage? If it's only receiving fuzzing courtesy of the generic listener/network/HTTP filter fuzzers, does it have a dedicated fuzzer for any parts of the code that would benefit? * Does the extension have unbounded internal buffering? Does it participate in flow control via watermarking as needed? * Does the extension have at least one deployment with live untrusted traffic for a period of time, N months? * Does the extension rely on dependencies that meet our [extension maturity model](https://github.com/envoyproxy/envoy/issues/10471)? * Is the extension reasonable to audit by Envoy security team? * Is the extension free of obvious scary things, e.g. `memcpy`, does it have gnarly parsing code, etc? * Does the extension have active [CODEOWNERS](CODEOWNERS) who are willing to vouch for the robustness of the extension? * Is the extension absent a [low coverage exception](https://github.com/envoyproxy/envoy/blob/master/test/per_file_coverage.sh#L5)? The current stability and security posture of all extensions can be seen [here](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/threat_model#core-and-extensions). ================================================ FILE: GOVERNANCE.md ================================================ # Process for becoming a maintainer ## Your organization is not yet a maintainer * Express interest to the senior maintainers that your organization is interested in becoming a maintainer. Becoming a maintainer generally means that you are going to be spending substantial time (>25%) on Envoy for the foreseeable future. You should have domain expertise and be extremely proficient in C++. Ultimately your goal is to become a senior maintainer that will represent your organization. * We will expect you to start contributing increasingly complicated PRs, under the guidance of the existing senior maintainers. * We may ask you to do some PRs from our backlog. * As you gain experience with the code base and our standards, we will ask you to do code reviews for incoming PRs (i.e., all maintainers are expected to shoulder a proportional share of community reviews). * After a period of approximately 2-3 months of working together and making sure we see eye to eye, the existing senior maintainers will confer and decide whether to grant maintainer status or not. We make no guarantees on the length of time this will take, but 2-3 months is the approximate goal. ## Your organization is currently a maintainer * First decide whether your organization really needs more people with maintainer access. Valid reasons are "blast radius", a large organization that is working on multiple unrelated projects, etc. * Contact a senior maintainer for your organization and express interest. * Start doing PRs and code reviews under the guidance of your senior maintainer. * After a period of 1-2 months the existing senior maintainers will discuss granting "standard" maintainer access. * "Standard" maintainer access can be upgraded to "senior" maintainer access after another 1-2 months of work and another conference of the existing senior committers. ## Maintainer responsibilities * Monitor email aliases. * Monitor Slack (delayed response is perfectly acceptable). * Triage GitHub issues and perform pull request reviews for other maintainers and the community. The areas of specialization listed in [OWNERS.md](OWNERS.md) can be used to help with routing an issue/question to the right person. * Triage build issues - file issues for known flaky builds or bugs, and either fix or find someone to fix any master build breakages. * During GitHub issue triage, apply all applicable [labels](https://github.com/envoyproxy/envoy/labels) to each new issue. Labels are extremely useful for future issue follow up. Which labels to apply is somewhat subjective so just use your best judgment. A few of the most important labels that are not self explanatory are: * **beginner**: Mark any issue that can reasonably be accomplished by a new contributor with this label. * **help wanted**: Unless it is immediately obvious that someone is going to work on an issue (and if so assign it), mark it help wanted. * **question**: If it's unclear if an issue is immediately actionable, mark it with the question label. Questions are easy to search for and close out at a later time. Questions can be promoted to other issue types once it's clear they are actionable (at which point the question label should be removed). * Make sure that ongoing PRs are moving forward at the right pace or closing them. * Participate when called upon in the [security release process](SECURITY.md). Note that although this should be a rare occurrence, if a serious vulnerability is found, the process may take up to several full days of work to implement. This reality should be taken into account when discussing time commitment obligations with employers. * In general continue to be willing to spend at least 25% of ones time working on Envoy (~1.25 business days per week). * We currently maintain an "on-call" rotation within the maintainers. Each on-call is 1 week. Although all maintainers are welcome to perform all of the above tasks, it is the on-call maintainer's responsibility to triage incoming issues/questions and marshal ongoing work forward. To reiterate, it is *not* the responsibility of the on-call maintainer to answer all questions and do all reviews, but it is their responsibility to make sure that everything is being actively covered by someone. * The on-call rotation is tracked at Opsgenie. The calendar is visible [here](https://calendar.google.com/calendar/embed?src=d6glc0l5rc3v235q9l2j29dgovh3dn48%40import.calendar.google.com&ctz=America%2FNew_York) or you can subscribe to the iCal feed [here](webcal://kubernetes.app.opsgenie.com/webapi/webcal/getRecentSchedule?webcalToken=39dd1a892faa8d0d689f889b9d09ae787355ddff894396546726a5a02bac5b26&scheduleId=a3505963-c064-4c97-8865-947dfcb06060) ## Cutting a release * We do releases every 3 months, at the end of each quarter, as described in the [release schedule](RELEASES.md#release-schedule). * Take a look at open issues tagged with the current release, by [searching](https://github.com/envoyproxy/envoy/issues) for "is:open is:issue milestone:[current milestone]" and either hold off until they are fixed or bump them to the next milestone. * Begin marshalling the ongoing PR flow in this repo. Ask maintainers to hold off merging any particularly risky PRs until after the release is tagged. This is because we aim for master to be at release candidate quality at all times. * Do a final check of the [release notes](docs/root/version_history/current.rst): * Make any needed corrections (grammar, punctuation, formatting, etc.). * Check to see if any security/stable version release notes are duplicated in the major version release notes. These should not be duplicated. * If the "Deprecated" section is empty, delete it. * Remove the "Pending" tags and add dates to the top of the [release notes for this version](docs/root/version_history/current.rst). * Switch the [VERSION](VERSION) from a "dev" variant to a final variant. E.g., "1.6.0-dev" to "1.6.0". * Update the [RELEASES](RELEASES.md) doc with the relevant dates. * Get a review and merge. * Wait for tests to pass on [master](https://dev.azure.com/cncf/envoy/_build). * Create a [tagged release](https://github.com/envoyproxy/envoy/releases). The release should start with "v" and be followed by the version number. E.g., "v1.6.0". **This must match the [VERSION](VERSION).** * From the envoy [landing page](https://github.com/envoyproxy/envoy) use the branch drop-down to create a branch from the tagged release, e.g. "release/v1.6". It will be used for the [stable releases](RELEASES.md#stable-releases). * Monitor the AZP tag build to make sure that the final docker images get pushed along with the final docs. The final documentation will end up in the [envoyproxy.github.io repository](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy). * Update the website ([example PR](https://github.com/envoyproxy/envoyproxy.github.io/pull/148)) for the new release. * Craft a witty/uplifting email and send it to all the email aliases including envoy-announce@. * Make sure we tweet the new release: either have Matt do it or email social@cncf.io and ask them to do an Envoy account post. * Do a new PR to setup the next version * Update [VERSION](VERSION) to the next development release. E.g., "1.7.0-dev". * `git mv docs/root/version_history/current.rst docs/root/version_history/v1.6.0.rst`, filling in the previous release version number in the filename, and add an entry for the new file in the `toctree` in [version_history.rst](docs/root/version_history/version_history.rst). * Create a new "current" version history file at the [release notes](docs/root/version_history/current.rst) for the following version. E.g., "1.7.0 (pending)". Use this text as the template for the new file: ``` 1.7.0 (Pending) =============== Incompatible Behavior Changes ----------------------------- *Changes that are expected to cause an incompatibility if applicable; deployment changes are likely required* Minor Behavior Changes ---------------------- *Changes that may cause incompatibilities for some users, but should not for most* Bug Fixes --------- *Changes expected to improve the state of the world and are unlikely to have negative effects* Removed Config or Runtime ------------------------- *Normally occurs at the end of the* :ref:`deprecation period ` New Features ------------ Deprecated ---------- ``` * Run the deprecate_versions.py script (e.g. `sh tools/deprecate_version/deprecate_version.sh`) to file tracking issues for runtime guarded code which can be removed. * Check source/common/runtime/runtime_features.cc and see if any runtime guards in disabled_runtime_features should be reassessed, and ping on the relevant issues. ## When does a maintainer lose maintainer status If a maintainer is no longer interested or cannot perform the maintainer duties listed above, they should volunteer to be moved to emeritus status. In extreme cases this can also occur by a vote of the maintainers per the voting process below. # xDS API shepherds The [xDS API shepherds](https://github.com/orgs/envoyproxy/teams/api-shepherds) are responsible for approving any PR that modifies the [api/](api/) tree. They ensure that API [style](api/STYLE.md) and [versioning](api/API_VERSIONING.md) policies are enforced and that a consistent approach is taken towards API evolution. The xDS API shepherds are also the xDS API maintainers; they work collaboratively with the community to drive the xDS API roadmap and review major proposed design changes. The API shepherds are intended to be representative of xDS client and control plane developers who are actively working on xDS development and evolution. As with maintainers, an API shepherd should be spending at least 25% of their time working on xDS developments and expect to be active in this space in the near future. API shepherds are expected to take on API shepherd review load and participate in meetings. They should be active on Slack `#xds` and responsive to GitHub issues and PRs on which they are tagged. The API shepherds are distinct to the [UDPA working group](https://github.com/cncf/udpa/blob/master/README.md), which aims to evolve xDS directionally towards a universal dataplane API. API shepherds are responsible for the execution of the xDS day-to-day and guiding xDS implementation changes. Proposals from UDPA-WG will be aligned with the xDS API shepherds to ensure that xDS is heading towards the UDPA goal. xDS API shepherds operate under the [envoyproxy](https://github.com/envoyproxy) organization but are expected to keep in mind the needs of all xDS clients (currently Envoy and gRPC, but we are aware of other in-house implementations) and the goals of UDPA-WG. If you wish to become an API shepherd and satisfy the above criteria, please contact an existing API shepherd. We will factor in PR and review history to determine if the above API shepherd requirements are met. We may ask you to shadow an existing API shepherd for a period of time to build confidence in consistent application of the API guidelines to PRs. # Extension addition policy Adding new [extensions](REPO_LAYOUT.md#sourceextensions-layout) has a dedicated policy. Please see [this](./EXTENSION_POLICY.md) document for more information. # External dependency policy Adding new external dependencies has a dedicated policy. Please see [this](DEPENDENCY_POLICY.md) document for more information. # Conflict resolution and voting In general, we prefer that technical issues and maintainer membership are amicably worked out between the persons involved. If a dispute cannot be decided independently, the maintainers can be called in to decide an issue. If the maintainers themselves cannot decide an issue, the issue will be resolved by voting. The voting process is a simple majority in which each senior maintainer receives two votes and each normal maintainer receives one vote. # Adding new projects to the envoyproxy GitHub organization New projects will be added to the envoyproxy organization via GitHub issue discussion in one of the existing projects in the organization. Once sufficient discussion has taken place (~3-5 business days but depending on the volume of conversation), the maintainers of *the project where the issue was opened* (since different projects in the organization may have different maintainers) will decide whether the new project should be added. See the section above on voting if the maintainers cannot easily decide. ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner]. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: NOTICE ================================================ Envoy Copyright 2016-2019 Envoy Project Authors Licensed under Apache License 2.0. See LICENSE for terms. ================================================ FILE: OWNERS.md ================================================ * See [CONTRIBUTING.md](CONTRIBUTING.md) for general contribution guidelines. * See [GOVERNANCE.md](GOVERNANCE.md) for governance guidelines and maintainer responsibilities. This page lists all active maintainers and their areas of expertise. This can be used for routing PRs, questions, etc. to the right place. # Senior maintainers * Matt Klein ([mattklein123](https://github.com/mattklein123)) (mklein@lyft.com) * Catch-all, "all the things", and generally trying to make himself obsolete as fast as possible. * Harvey Tuch ([htuch](https://github.com/htuch)) (htuch@google.com) * APIs, xDS, UDPA, gRPC, configuration, security, Python, and Bash. * Alyssa Wilk ([alyssawilk](https://github.com/alyssawilk)) (alyssar@google.com) * HTTP, flow control, cluster manager, load balancing, and core networking (listeners, connections, etc.). * Stephan Zuercher ([zuercher](https://github.com/zuercher)) (zuercher@gmail.com) * Load balancing, upstream clusters and cluster manager, logging, complex HTTP routing (metadata, etc.), and macOS build. * Lizan Zhou ([lizan](https://github.com/lizan)) (lizan@tetrate.io) * gRPC, gRPC/JSON transcoding, and core networking (transport socket abstractions), Bazel, build issues, and CI in general. * Snow Pettersen ([snowp](https://github.com/snowp)) (aickck@gmail.com) * Upstream, host/priority sets, load balancing, and retry plugins. * Greg Greenway ([ggreenway](https://github.com/ggreenway)) (ggreenway@apple.com) * TLS, TCP proxy, listeners, and HTTP proxy/connection pooling. # Maintainers * Asra Ali ([asraa](https://github.com/asraa)) (asraa@google.com) * Fuzzing, security, headers, HTTP/gRPC, router, access log, tests. * Yan Avlasov ([yanavlasov](https://github.com/yanavlasov)) (yavlasov@google.com) * Data plane, codecs, security, configuration. * Jose Nino ([junr03](https://github.com/junr03)) (jnino@lyft.com) * Outlier detection, HTTP routing, xDS, configuration/operational questions. * Dhi Aurrahman ([dio](https://github.com/dio)) (dio@tetrate.io) * Lua, access logging, and general miscellany. * Joshua Marantz ([jmarantz](https://github.com/jmarantz)) (jmarantz@google.com) * Stats, abseil, scalability, and performance. * Antonio Vicente ([antoniovicente](https://github.com/antoniovicente)) (avd@google.com) * Event management, security, performance, data plane. # Envoy security team * All maintainers * Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com) * Tony Allen ([tonya11en](https://github.com/tonya11en)) (tallen@lyft.com) # Emeritus maintainers * Constance Caramanolis ([ccaraman](https://github.com/ccaraman)) (ccaramanolis@lyft.com) * Roman Dzhabarov ([RomanDzhabarov](https://github.com/RomanDzhabarov)) (rdzhabarov@lyft.com) * Bill Gallagher ([wgallagher](https://github.com/wgallagher)) (bgallagher@lyft.com) * Dan Noé ([dnoe](https://github.com/dnoe)) (dpn@google.com) # Friends of Envoy This section lists a few people that are not maintainers but typically help out with subject matter expert reviews. Feel free to loop them in as needed. * Piotr Sikora ([PiotrSikora](https://github.com/PiotrSikora)) (piotrsikora@google.com) * TLS, BoringSSL, and core networking (listeners, connections, etc.). * Shriram Rajagopalan ([rshriram](https://github.com/rshriram)) (shriram@us.ibm.com) * Istio, APIs, HTTP routing, and WebSocket. * John Millikin ([jmillikin-stripe](https://github.com/jmillikin-stripe)) (jmillikin@stripe.com) * Bazel/build. * Daniel Hochman ([danielhochman](https://github.com/danielhochman)) (dhochman@lyft.com) * Redis, Python, configuration/operational questions. * Yuchen Dai ([lambdai](https://github.com/lambdai)) (lambdai@google.com) * v2 xDS, listeners, filter chain discovery service. ================================================ FILE: PULL_REQUESTS.md ================================================ When creating an Envoy pull request (PR) the text box will automatically be filled in with the basic fields from the [pull request template](PULL_REQUEST_TEMPLATE.md). The following is a more detailed explanation of what should go in each field. ### Title The title of the PR should brief (one line) noting the subsystem or the aspect this PR applies to and explaining the overall change. Both the component and the explanation must be lower case. For example: * ci: update build image to 44d539cb * docs: fix indent, buffer: add copyOut() method * router:add x-envoy-overloaded header * tls: add support for specifying TLS session ticket keys ### Commit Message The commit message field should include an explanation of what this PR does. This will be used as the final commit message that maintainers will use to populate the commit message when merging. If this PR causes a change in behavior it should document the behavior before and after. If fixing a bug, please describe what the original issue is and how the change resolves it. If it is configuration controlled, it should note how the feature is enabled etc... ### Additional Description The additional description field should include information of what this PR does that may be out of scope for a commit message. This could include additional information or context useful to reviewers. ### Risk Risk Level is one of: Low | Medium | High Low: Small bug fix or small optional feature. Medium: New features that are not enabled(for example: new filter). Small-medium features added to existing components(for example: modification to an existing filter). High: Complicated changes such as flow control, rewrites of critical components, etc. Note: The above is only a rough guide for choosing a level, please ask if you have any concerns about the risk of the PR. ### Testing The testing section should include an explanation of what testing was done, for example: unit test, integration, manual testing, etc. Note: It isn’t expected to do all forms of testing, please use your best judgement or ask for guidance if you are unsure. A good rule of thumb is the riskier the change, the more comprehensive the testing should be. ### Documentation If there are documentation changes, please include a brief description of what they are. Docs changes may be in [docs/root](docs/root) and/or inline with the API protos. Please write in N/A if there were no documentation changes. Any PRs with structural changes to the dataplane should also update the [Life of a Request](docs/root/intro/life_of_a_request.md) documentation as appropriate. ### Release notes If this change is user impacting OR extension developer impacting (filter API, etc.) you **must** add a release note to the [version history](docs/root/version_history/current.rst) for the current version. Please include any relevant links. Each release note should be prefixed with the relevant subsystem in **alphabetical order** (see existing examples as a guide) and include links to relevant parts of the documentation. Thank you! Please write in N/A if there are no release notes. ### Runtime guard If this PR has a user-visible behavioral change, or otherwise falls under the guidelines for runtime guarding in the [contributing doc](CONTRIBUTING.md) it should have a runtime guard, which should be documented both in the release notes and here in the PR description. For new feature additions guarded by configs, no-op refactors, docs changes etc. this field can be disregarded and/or removed. ### Issues If this PR fixes an outstanding issue, please add a line of the form: Fixes #Issue This will result in the linked issue being automatically closed when the PR is merged. If you want to associate an issue with a PR without closing the issue, you may instead just tag the PR with the issue: \#Issue ### Deprecated If this PR deprecates existing Envoy APIs or code, it should include an update to the deprecated section of the [version history](docs/root/version_history/current.rst) and a one line note in the PR description. If you mark existing APIs or code as deprecated, when the next release is cut, the deprecation script will create and assign an issue to you for cleaning up the deprecated code path. ================================================ FILE: PULL_REQUEST_TEMPLATE.md ================================================ For an explanation of how to fill out the fields, please see the relevant section in [PULL_REQUESTS.md](https://github.com/envoyproxy/envoy/blob/master/PULL_REQUESTS.md) Commit Message: Additional Description: Risk Level: Testing: Docs Changes: Release Notes: [Optional Runtime guard:] [Optional Fixes #Issue] [Optional Deprecated:] ================================================ FILE: README.md ================================================ ![Envoy Logo](https://github.com/envoyproxy/artwork/blob/master/PNG/Envoy_Logo_Final_PANTONE.png) [Cloud-native high-performance edge/middle/service proxy](https://www.envoyproxy.io/) Envoy is hosted by the [Cloud Native Computing Foundation](https://cncf.io) (CNCF). If you are a company that wants to help shape the evolution of technologies that are container-packaged, dynamically-scheduled and microservices-oriented, consider joining the CNCF. For details about who's involved and how Envoy plays a role, read the CNCF [announcement](https://www.cncf.io/blog/2017/09/13/cncf-hosts-envoy/). [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1266/badge)](https://bestpractices.coreinfrastructure.org/projects/1266) [![Azure Pipelines](https://dev.azure.com/cncf/envoy/_apis/build/status/11?branchName=master)](https://dev.azure.com/cncf/envoy/_build/latest?definitionId=11&branchName=master) [![CircleCI](https://circleci.com/gh/envoyproxy/envoy/tree/master.svg?style=shield)](https://circleci.com/gh/envoyproxy/envoy/tree/master) [![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/envoy.svg)](https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:envoy) [![Jenkins](https://img.shields.io/jenkins/s/https/powerci.osuosl.org/job/build-envoy-master/badge/icon/.svg?label=ppc64le%20build)](http://powerci.osuosl.org/job/build-envoy-master/) ## Documentation * [Official documentation](https://www.envoyproxy.io/) * [FAQ](https://www.envoyproxy.io/docs/envoy/latest/faq/overview) * [Unofficial Chinese documentation](https://www.servicemesher.com/envoy/) * Watch [a video overview of Envoy](https://www.youtube.com/watch?v=RVZX4CwKhGE) ([transcript](https://www.microservices.com/talks/lyfts-envoy-monolith-service-mesh-matt-klein/)) to find out more about the origin story and design philosophy of Envoy * [Blog](https://medium.com/@mattklein123/envoy-threading-model-a8d44b922310) about the threading model * [Blog](https://medium.com/@mattklein123/envoy-hot-restart-1d16b14555b5) about hot restart * [Blog](https://medium.com/@mattklein123/envoy-stats-b65c7f363342) about stats architecture * [Blog](https://medium.com/@mattklein123/the-universal-data-plane-api-d15cec7a) about universal data plane API * [Blog](https://medium.com/@mattklein123/lyfts-envoy-dashboards-5c91738816b1) on Lyft's Envoy dashboards ## Related * [data-plane-api](https://github.com/envoyproxy/data-plane-api): v2 API definitions as a standalone repository. This is a read-only mirror of [api](api/). * [envoy-perf](https://github.com/envoyproxy/envoy-perf): Performance testing framework. * [envoy-filter-example](https://github.com/envoyproxy/envoy-filter-example): Example of how to add new filters and link to the main repository. ## Contact * [envoy-announce](https://groups.google.com/forum/#!forum/envoy-announce): Low frequency mailing list where we will email announcements only. * [envoy-security-announce](https://groups.google.com/forum/#!forum/envoy-security-announce): Low frequency mailing list where we will email security related announcements only. * [envoy-users](https://groups.google.com/forum/#!forum/envoy-users): General user discussion. * [envoy-dev](https://groups.google.com/forum/#!forum/envoy-dev): Envoy developer discussion (APIs, feature design, etc.). * [envoy-maintainers](https://groups.google.com/forum/#!forum/envoy-maintainers): Use this list to reach all core Envoy maintainers. * [Twitter](https://twitter.com/EnvoyProxy/): Follow along on Twitter! * [Slack](https://envoyproxy.slack.com/): Slack, to get invited go [here](https://envoyslack.cncf.io). We have the IRC/XMPP gateways enabled if you prefer either of those. Once an account is created, connection instructions for IRC/XMPP can be found [here](https://envoyproxy.slack.com/account/gateways). * NOTE: Response to user questions is best effort on Slack. For a "guaranteed" response please email envoy-users@ per the guidance in the following linked thread. Please see [this](https://groups.google.com/forum/#!topic/envoy-announce/l9zjYsnS3TY) email thread for information on email list usage. ## Contributing Contributing to Envoy is fun and modern C++ is a lot less scary than you might think if you don't have prior experience. To get started: * [Contributing guide](CONTRIBUTING.md) * [Beginner issues](https://github.com/envoyproxy/envoy/issues?q=is%3Aopen+is%3Aissue+label%3Abeginner) * [Build/test quick start using docker](ci#building-and-running-tests-as-a-developer) * [Developer guide](DEVELOPER.md) * Consider installing the Envoy [development support toolchain](https://github.com/envoyproxy/envoy/blob/master/support/README.md), which helps automate parts of the development process, particularly those involving code review. * Please make sure that you let us know if you are working on an issue so we don't duplicate work! ## Community Meeting The Envoy team meets twice per month on Tuesday, alternating between 9am PT and 5PM PT. The public Google calendar is here: https://goo.gl/PkDijT * Meeting minutes are [here](https://goo.gl/5Cergb) * Recorded videos are posted [here](https://www.youtube.com/channel/UCvqbFHwN-nwalWPjPUKpvTA/videos?view=0&sort=dd&shelf_id=1) ## Security ### Security Audit A third party security audit was performed by Cure53, you can see the full report [here](docs/SECURITY_AUDIT.pdf). ### Reporting security vulnerabilities If you've found a vulnerability or a potential vulnerability in Envoy please let us know at [envoy-security](mailto:envoy-security@googlegroups.com). We'll send a confirmation email to acknowledge your report, and we'll send an additional email when we've identified the issue positively or negatively. For further details please see our complete [security release process](SECURITY.md). ================================================ FILE: RELEASES.md ================================================ # Release Process ## Active development Active development is happening on the `master` branch, and a new version is released from it at the end of each quarter. ## Stable releases Stable releases of Envoy include: * Extended maintenance window (any version released in the last 12 months). * Security fixes backported from the `master` branch (including those deemed not worthy of creating a CVE). * Stability fixes backported from the `master` branch (anything that can result in a crash, including crashes triggered by a trusted control plane). * Bugfixes, deemed worthwhile by the maintainers of stable releases. ### Hand-off Hand-off to the maintainers of stable releases happens after Envoy maintainers release a new version from the `master` branch by creating a `vX.Y.0` tag and a corresponding `release/vX.Y` branch, with merge permissions given to the release manager of stable releases, and CI configured to execute tests on it. ### Security releases Critical security fixes are owned by the Envoy security team, which provides fixes for the `master` branch, and the latest release branch. Once those fixes are ready, the maintainers of stable releases backport them to the remaining supported stable releases. ### Backports All other security and reliability fixes can be nominated for backporting to stable releases by Envoy maintainers, Envoy security team, the change author, or members of the Envoy community by adding the `backport/review` or `backport/approved` label (this can be done using [repokitteh]'s `/backport` command). Changes nominated by the change author and/or members of the Envoy community are evaluated for backporting on a case-by-case basis, and require approval from either the release manager of stable release, Envoy maintainers, or Envoy security team. Once approved, those fixes are backported from the `master` branch to all supported stable branches by the maintainers of stable releases. New stable versions from non-critical security fixes are released on a regular schedule, initially aiming for the bi-weekly releases. ### Release management Release managers of stable releases are responsible for approving and merging backports, tagging stable releases and sending announcements about them. This role is rotating on a quarterly basis. | Quarter | Release manager | |:-------:|:----------------------------:| | 2020 Q1 | Piotr Sikora ([PiotrSikora]) | | 2020 Q2 | Piotr Sikora ([PiotrSikora]) | | 2020 Q3 | Yuchen Dai ([lambdai]) | ## Release schedule In order to accommodate downstream projects, new Envoy releases are produced on a fixed release schedule (at the end of each quarter), with an acceptable delay of up to 2 weeks, with a hard deadline of 3 weeks. | Version | Expected | Actual | Difference | End of Life | |:-------:|:----------:|:----------:|:----------:|:-----------:| | 1.12.0 | 2019/09/30 | 2019/10/31 | +31 days | 2020/10/31 | | 1.13.0 | 2019/12/31 | 2020/01/20 | +20 days | 2021/01/20 | | 1.14.0 | 2020/03/31 | 2020/04/08 | +8 days | 2021/04/08 | | 1.15.0 | 2020/06/30 | 2020/07/07 | +7 days | 2021/07/07 | | 1.16.0 | 2020/09/30 | 2020/10/08 | +8 days | 2021/10/08 | | 1.17.0 | 2020/12/31 | | | | [repokitteh]: https://github.com/repokitteh [PiotrSikora]: https://github.com/PiotrSikora ================================================ FILE: REPO_LAYOUT.md ================================================ # Repository layout overview This is a high level overview of how the repository is laid out to both aid in code investigation, as well as to clearly specify how extensions are added to the repository. The top level directories are: * [.circleci/](.circleci/): Configuration for [CircleCI](https://circleci.com/gh/envoyproxy). * [api/](api/): Envoy data plane API. * [bazel/](bazel/): Configuration for Envoy's use of [Bazel](https://bazel.build/). * [ci/](ci/): Scripts used both during CI as well as to build Docker containers. * [configs/](configs/): Example Envoy configurations. * [docs/](docs/): End user facing Envoy proxy and data plane API documentation as well as scripts for publishing final docs during releases. * [examples/](examples/): Larger Envoy examples using Docker and Docker Compose. * [include/](include/): "Public" interface headers for "core" Envoy. In general, these are almost entirely 100% abstract classes. There are a few cases of not-abstract classes in the "public" headers, typically for performance reasons. Note that "core" includes some "extensions" such as the HTTP connection manager filter and associated functionality which are so fundamental to Envoy that they will likely never be optional from a compilation perspective. * [restarter/](restarter/): Envoy's hot restart wrapper Python script. * [source/](source/): Source code for core Envoy as well as extensions. The layout of this directory is discussed in further detail below. * [support/](support/): Development support scripts (pre-commit Git hooks, etc.) * [test/](test/): Test code for core Envoy as well as extensions. The layout of this directory is discussed in further detail below. * [tools/](tools/): Miscellaneous tools that have not found a home somewhere else. ## [source/](source/) * [common/](source/common/): Core Envoy code (not specific to extensions) that is also not specific to a standalone server implementation. I.e., this is the code that could be used if Envoy were eventually embedded as a library. * [docs/](source/docs/): Miscellaneous developer/design documentation that is not relevant for the public user documentation. * [exe/](source/exe/): Code specific to building the final production Envoy server binary. This is the only code that is not shared by integration and unit tests. * [extensions/](source/extensions/): Extensions to the core Envoy code. The layout of this directory is discussed in further detail below. * [server/](source/server/): Code specific to running Envoy as a standalone server. E.g, configuration, server startup, workers, etc. Over time, the line between `common/` and `server/` has become somewhat blurred. Use best judgment as to where to place something. ## [test/](test/) Not every directory within test is described below, but a few highlights: * Unit tests are found in directories matching their [source/](source/) equivalents. E.g., [common/](test/common/), [exe/](test/exe/), and [server/](test/server/). * Extension unit tests also match their source equivalents in [extensions/](test/extensions/). * [integration/](test/integration/) holds end-to-end integration tests using roughly the real Envoy server code, fake downstream clients, and fake upstream servers. Integration tests also test some of the extensions found in the repository. Note that in the future, we would like to allow integration tests that are specific to extensions and are not required for covering "core" Envoy functionality. Those integration tests will likely end up in the [extensions/](test/extensions/) directory but further work and thinking is required before we get to that point. * [mocks/](test/mocks/) contains mock implementations of all of the core Envoy interfaces found in [include/](include/). * Other directories include tooling used for configuration testing, coverage testing, fuzz testing, common test code, etc. ## [source/extensions](source/extensions/) layout We maintain a very specific code and namespace layout for extensions. This aids in discovering code/extensions, and also will allow us in the future to more easily scale out our extension maintainers by having OWNERS files specific to certain extensions. (As of this writing, this is not currently implemented but that is the plan moving forward.) * All extensions are either registered in [all_extensions.bzl](source/extensions/all_extensions.bzl) or [extensions_build_config.bzl](source/extensions/extensions_build_config.bzl). The former is for extensions that cannot be removed from the primary Envoy build. The latter is for extensions that can be removed on a site specific basis. See [bazel/README.md](bazel/README.md) for how to compile out extensions on a site specific basis. Note that by default extensions should be removable from the build unless there is a very good reason. * These are the top level extension directories and associated namespaces: * [access_loggers/](/source/extensions/access_loggers): Access log implementations which use the `Envoy::Extensions::AccessLoggers` namespace. * [filters/http/](/source/extensions/filters/http): HTTP L7 filters which use the `Envoy::Extensions::HttpFilters` namespace. * [filters/listener/](/source/extensions/filters/listener): Listener filters which use the `Envoy::Extensions::ListenerFilters` namespace. * [filters/network/](/source/extensions/filters/network): L4 network filters which use the `Envoy::Extensions::NetworkFilters` namespace. * [grpc_credentials/](/source/extensions/grpc_credentials): Custom gRPC credentials which use the `Envoy::Extensions::GrpcCredentials` namespace. * [health_checker/](/source/extensions/health_checker): Custom health checkers which use the `Envoy::Extensions::HealthCheckers` namespace. * [resolvers/](/source/extensions/resolvers): Network address resolvers which use the `Envoy::Extensions::Resolvers` namespace. * [stat_sinks/](/source/extensions/stat_sinks): Stat sink implementations which use the `Envoy::Extensions::StatSinks` namespace. * [tracers/](/source/extensions/tracers): Tracers which use the `Envoy::Extensions::Tracers` namespace. * [transport_sockets/](/source/extensions/transport_sockets): Transport socket implementations which use the `Envoy::Extensions::TransportSockets` namespace. * Each extension is contained wholly in its own namespace. E.g., `Envoy::Extensions::NetworkFilters::Echo`. * Common code that is used by multiple extensions should be in a `common/` directory as close to the extensions as possible. E.g., [filters/common/](/source/extensions/filters/common) for common code that is used by both HTTP and network filters. Common code used only by two HTTP filters would be found in `filters/http/common/`. Common code should be placed in a common namespace. E.g., `Envoy::Extensions::Filters::Common`. ================================================ FILE: SECURITY.md ================================================ # Security Reporting Process Please report any security issue or Envoy crash report to envoy-security@googlegroups.com where the issue will be triaged appropriately. Thank you in advance for helping to keep Envoy secure. # Security Release Process Envoy is a large growing community of volunteers, users, and vendors. The Envoy community has adopted this security disclosure and response policy to ensure we responsibly handle critical issues. ## Product Security Team (PST) Security vulnerabilities should be handled quickly and sometimes privately. The primary goal of this process is to reduce the total time users are vulnerable to publicly known exploits. The Product Security Team (PST) is responsible for organizing the entire response including internal communication and external disclosure but will need help from relevant developers to successfully run this process. The initial Product Security Team will consist of all [maintainers](OWNERS.md) in the private [envoy-security](https://groups.google.com/forum/#!forum/envoy-security) list. In the future we may decide to have a subset of maintainers work on security response given that this process is time consuming. ## Disclosures ### Private Disclosure Processes The Envoy community asks that all suspected vulnerabilities be privately and responsibly disclosed via the [reporting policy](README.md#reporting-security-vulnerabilities). ### Public Disclosure Processes If you know of a publicly disclosed security vulnerability please IMMEDIATELY email [envoy-security](https://groups.google.com/forum/#!forum/envoy-security) to inform the Product Security Team (PST) about the vulnerability so they may start the patch, release, and communication process. If possible the PST will ask the person making the public report if the issue can be handled via a private disclosure process (for example if the full exploit details have not yet been published). If the reporter denies the request for private disclosure, the PST will move swiftly with the fix and release process. In extreme cases GitHub can be asked to delete the issue but this generally isn't necessary and is unlikely to make a public disclosure less damaging. ## Patch, Release, and Public Communication For each vulnerability a member of the PST will volunteer to lead coordination with the "Fix Team" and is responsible for sending disclosure emails to the rest of the community. This lead will be referred to as the "Fix Lead." The role of Fix Lead should rotate round-robin across the PST. Note that given the current size of the Envoy community it is likely that the PST is the same as the "Fix team." (I.e., all maintainers). The PST may decide to bring in additional contributors for added expertise depending on the area of the code that contains the vulnerability. All of the timelines below are suggestions and assume a private disclosure. The Fix Lead drives the schedule using their best judgment based on severity and development time. If the Fix Lead is dealing with a public disclosure all timelines become ASAP (assuming the vulnerability has a CVSS score >= 4; see below). If the fix relies on another upstream project's disclosure timeline, that will adjust the process as well. We will work with the upstream project to fit their timeline and best protect our users. ### Released versions and master branch If the vulnerability affects the last point release version, e.g. 1.10, then the full security release process described in this document will be activated. A security point release will be created for 1.10, e.g. 1.10.1, together with a fix to master if necessary. Older point releases, e.g. 1.9, are not supported by the Envoy project and will not have any security release created. If a security vulnerability affects only these older versions but not master or the last supported point release, the Envoy security team will share this information with the private distributor list, following the standard embargo process, but not create a security release. After the embargo expires, the vulnerability will be described as a GitHub issue. A CVE will be filed if warranted by severity. If a vulnerability does not affect any point release but only master, additional caveats apply: * If the issue is detected and a fix is available within 7 days of the introduction of the vulnerability, or the issue is deemed a low severity vulnerability by the Envoy maintainer and security teams, the fix will be publicly reviewed and landed on master. If the severity is at least medium or at maintainer discretion a courtesy e-mail will be sent to envoy-users@googlegroups.com, envoy-dev@googlegroups.com, envoy-security-announce@googlegroups.com and cncf-envoy-distributors-announce@lists.cncf.io. * If the vulnerability has been in existence for more than 7 days and is medium or higher, we will activate the security release process. We advise distributors and operators working from the master branch to allow at least 5 days soak time after cutting a binary release before distribution or rollout, to allow time for our fuzzers to detect issues during their execution on ClusterFuzz. A soak period of 7 days provides an even stronger guarantee, since we will invoke the security release process for medium or higher severity issues for these older bugs. ### Threat model See https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/threat_model. Vulnerabilities are evaluated against this threat model when deciding whether to activate the Envoy security release process. ### Fix Team Organization These steps should be completed within the first 24 hours of disclosure. - The Fix Lead will work quickly to identify relevant engineers from the affected projects and packages and CC those engineers into the disclosure thread. These selected developers are the Fix Team. - The Fix Lead will get the Fix Team access to private security repos to develop the fix. ### Fix Development Process These steps should be completed within the 1-7 days of Disclosure. - The Fix Lead and the Fix Team will create a [CVSS](https://www.first.org/cvss/specification-document) using the [CVSS Calculator](https://www.first.org/cvss/calculator/3.0). The Fix Lead makes the final call on the calculated CVSS; it is better to move quickly than making the CVSS perfect. - The Fix Team will notify the Fix Lead that work on the fix branch is complete once there are LGTMs on all commits in the private repo from one or more maintainers. If the CVSS score is under 4.0 ([a low severity score](https://www.first.org/cvss/specification-document#i5)) the Fix Team can decide to slow the release process down in the face of holidays, developer bandwidth, etc. These decisions must be discussed on the envoy-security mailing list. A three week window will be provided to members of the private distributor list from candidate patch availability until the security release date. It is expected that distributors will normally be able to perform a release within this time window. If there are exceptional circumstances, the Envoy security team will raise this window to four weeks. The release window will be reduced if the security issue is public or embargo is broken. We will endeavor not to overlap this three week window with or place it adjacent to major corporate holiday periods or end-of-quarter (e.g. impacting downstream Istio releases), where possible. ### Fix and disclosure SLOs * All reports to envoy-security@googlegroups.com will be triaged and have an initial response within 1 business day. * Privately disclosed issues will be fixed or publicly disclosed within 90 days by the Envoy security team. In exceptional circumstances we reserve the right to work with the discloser to coordinate on an extension, but this will be rarely used. * Any issue discovered by the Envoy security team and raised in our private bug tracker will be converted to a public issue within 90 days. We will regularly audit these issues to ensure that no major vulnerability (from the perspective of the threat model) is accidentally leaked. * Fuzz bugs are subject to a 90 day disclosure deadline. * Three weeks notice will be provided to private distributors from patch availability until the embargo deadline. * Public zero days will be fixed ASAP, but there is no SLO for this, since this will depend on the severity and impact to the organizations backing the Envoy security team. ### Fix Disclosure Process With the fix development underway, the Fix Lead needs to come up with an overall communication plan for the wider community. This Disclosure process should begin after the Fix Team has developed a Fix or mitigation so that a realistic timeline can be communicated to users. **Disclosure of Forthcoming Fix to Users** (Completed within 1-7 days of Disclosure) - The Fix Lead will email [envoy-security-announce@googlegroups.com](https://groups.google.com/forum/#!forum/envoy-security-announce) (CC [envoy-announce@googlegroups.com](https://groups.google.com/forum/#!forum/envoy-announce)) informing users that a security vulnerability has been disclosed and that a fix will be made available at YYYY-MM-DD HH:MM UTC in the future via this list. This time is the Release Date. - The Fix Lead will include any mitigating steps users can take until a fix is available. The communication to users should be actionable. They should know when to block time to apply patches, understand exact mitigation steps, etc. **Optional Fix Disclosure to Private Distributors List** (Completed within 1-14 days of Disclosure): - The Fix Lead will make a determination with the help of the Fix Team if an issue is critical enough to require early disclosure to distributors. Generally this Private Distributor Disclosure process should be reserved for remotely exploitable or privilege escalation issues. Otherwise, this process can be skipped. - The Fix Lead will email the patches to cncf-envoy-distributors-announce@lists.cncf.io so distributors can prepare builds to be available to users on the day of the issue's announcement. Any patches against main will be updated and resent weekly. Distributors should read about the [Private Distributors List](#private-distributors-list) to find out the requirements for being added to this list. - **What if a vendor breaks embargo?** The PST will assess the damage. The Fix Lead will make the call to release earlier or continue with the plan. When in doubt push forward and go public ASAP. **Fix Release Day** (Completed within 1-21 days of Disclosure) - The maintainers will create a new patch release branch from the latest patch release tag + the fix from the security branch. As a practical example if v1.5.3 is the latest patch release in Envoy.git a new branch will be created called v1.5.4 which includes only patches required to fix the issue. - The Fix Lead will cherry-pick the patches onto the master branch and all relevant release branches. The Fix Team will LGTM and merge. Maintainers will merge these PRs as quickly as possible. Changes shouldn't be made to the commits even for a typo in the CHANGELOG as this will change the git sha of the commits leading to confusion and potentially conflicts as the fix is cherry-picked around branches. - The Fix Lead will request a CVE from [DWF](https://github.com/distributedweaknessfiling/DWF-Documentation) and include the CVSS and release details. - The Fix Lead will email envoy-{dev,users,announce}@googlegroups.com now that everything is public announcing the new releases, the CVE number, and the relevant merged PRs to get wide distribution and user action. As much as possible this email should be actionable and include links on how to apply the fix to user's environments; this can include links to external distributor documentation. - The Fix Lead will remove the Fix Team from the private security repo. ### Retrospective These steps should be completed 1-3 days after the Release Date. The retrospective process [should be blameless](https://landing.google.com/sre/book/chapters/postmortem-culture.html). - The Fix Lead will send a retrospective of the process to envoy-dev@googlegroups.com including details on everyone involved, the timeline of the process, links to relevant PRs that introduced the issue, if relevant, and any critiques of the response and release process. - Maintainers and Fix Team are also encouraged to send their own feedback on the process to envoy-dev@googlegroups.com. Honest critique is the only way we are going to get good at this as a community. ## Private Distributors List This list is intended to be used primarily to provide actionable information to multiple distribution vendors as well as a *limited* set of high impact end users at once. *This list is not intended in the general case for end users to find out about security issues*. ### Embargo Policy The information members receive on cncf-envoy-distributors-announce must not be made public, shared, nor even hinted at anywhere beyond the need-to-know within your specific team except with the list's explicit approval. This holds true until the public disclosure date/time that was agreed upon by the list. Members of the list and others may not use the information for anything other than getting the issue fixed for your respective users. Before any information from the list is shared with respective members of your team required to fix said issue, they must agree to the same terms and only find out information on a need-to-know basis. We typically expect a single point-of-contact (PoC) at any given legal entity. Within the organization, it is the responsibility of the PoC to share CVE and related patches internally. This should be performed on a strictly need-to-know basis with affected groups to the extent that this is technically plausible. All teams should be aware of the embargo conditions and accept them. Ultimately, if an organization breaks embargo transitively through such sharing, they will lose the early disclosure privilege, so it's in their best interest to carefully share information internally, following best practices and use their judgement in balancing the tradeoff between protecting users and maintaining confidentiality. The embargo applies to information shared, source code and binary images. **It is a violation of the embargo policy to share binary distributions of the security fixes before the public release date.** This includes, but is not limited to, Envoy binaries and Docker images. It is expected that distributors have a method to stage and validate new binaries without exposing them publicly. If the information shared is under embargo from a third party, where Envoy is one of many projects that a disclosure is shared with, it is critical to consider that the ramifications of any leak will extend beyond the Envoy community and will leave us in a position in which we will be less likely to receive embargoed reports in the future. In the unfortunate event you share the information beyond what is allowed by this policy, you _must_ urgently inform the envoy-security@googlegroups.com mailing list of exactly what information leaked and to whom. A retrospective will take place after the leak so we can assess how to prevent making the same mistake in the future. If you continue to leak information and break the policy outlined here, you will be removed from the list. ### Contributing Back This is a team effort. As a member of the list you must carry some water. This could be in the form of the following: **Technical** - Review and/or test the proposed patches and point out potential issues with them (such as incomplete fixes for the originally reported issues, additional issues you might notice, and newly introduced bugs), and inform the list of the work done even if no issues were encountered. **Administrative** - Help draft emails to the public disclosure mailing list. - Help with release notes. ### Membership Criteria To be eligible for the cncf-envoy-distributors-announce mailing list, your use of Envoy should: 1. Be either: 1. An actively maintained distribution of Envoy components. An example is "SuperAwesomeLinuxDistro" which offers Envoy pre-built packages. Another example is "SuperAwesomeServiceMesh" which offers a service mesh product that includes Envoy as a component. OR 2. Offer Envoy as a publicly available infrastructure or platform service, in which the product clearly states (e.g. public documentation, blog posts, marketing copy, etc.) that it is built on top of Envoy. E.g., "SuperAwesomeCloudProvider's Envoy as a Service (EaaS)". An infrastructure service that uses Envoy for a product but does not publicly say they are using Envoy does not *generally* qualify (see option 3 that follows). This is essentially IaaS or PaaS. If you use Envoy to support a SaaS, e.g. "SuperAwesomeCatVideoService", this does not *generally* qualify. OR 3. An end user of Envoy that satisfies the following requirements: 1. Is "well known" to the Envoy community. Being "well known" is fully subjective and determined by the Envoy maintainers and security team. Becoming "well known" would generally be achieved by activities such as: PR contributions, either code or documentation; helping other end users on Slack, GitHub, and the mailing lists; speaking about use of Envoy at conferences; writing about use of Envoy in blog posts; sponsoring Envoy conferences, meetups, and other activities; etc. This is a more strict variant of item 5 below. 2. Is of sufficient size, scale, and impact to make your inclusion on the list worthwhile. The definition of size, scale, and impact is fully subjective and determined by the Envoy maintainers and security team. The definition will not be discussed further in this document. 3. You *must* smoke test and then widely deploy security patches promptly and report back success or failure ASAP. Furthermore, the Envoy maintainers may occasionally ask you to smoke test especially risky public PRs before they are merged. Not performing these tasks in a reasonably prompt timeframe will result in removal from the list. This is a more strict variant of item 7 below. 4. In order to balance inclusion in the list versus a greater chance of accidental disclosure, end users added to the list via this option will be limited to a total of **10** slots. Periodic review (see below) may allow new slots to open, so please continue to apply if it seems your organization would otherwise qualify. The security team also reserves the right to change this limit in the future. 2. Have a user or customer base not limited to your own organization (except for option 3 above). We will use the size of the user or customer base as part of the criteria to determine eligibility. 3. Have a publicly verifiable track record up to present day of fixing security issues. 4. Not be a downstream or rebuild of another distribution. 5. Be a participant and active contributor in the community. 6. Accept the [Embargo Policy](#embargo-policy) that is outlined above. You must have a way to privately stage and validate your updates that does not violate the embargo. 7. Be willing to [contribute back](#contributing-back) as outlined above. 8. Be able to perform a security release of your product within a three week window from candidate fix patch availability. 9. Have someone already on the list vouch for the person requesting membership on behalf of your distribution. 10. Nominate an e-mail alias or list for your organization to receive updates. This should not be an individual user address, but instead a list that can be maintained by your organization as individuals come and go. A good example is envoy-security@seven.com, a bad example is acidburn@seven.com. You must accept the invite sent to this address or you will not receive any e-mail updates. This e-mail address will be [shared with the Envoy community](#Members). Note that Envoy maintainers are members of the Envoy security team. [Members of the Envoy security team](OWNERS.md#envoy-security-team) and the organizations that they represent are implicitly included in the private distributor list. These organizations do not need to meet the above list of criteria with the exception of the acceptance of the embargo policy. ### Requesting to Join New membership requests are sent to envoy-security@googlegroups.com. In the body of your request please specify how you qualify and fulfill each criterion listed in [Membership Criteria](#membership-criteria). Here is a pseudo example: ``` To: envoy-security@googlegroups.com Subject: Seven-Corp Membership to cncf-envoy-distributors-announce Below are each criterion and why I think we, Seven-Corp, qualify. > 1. Be an actively maintained distribution of Envoy components OR offer Envoy as a publicly available service in which the product clearly states that it is built on top of Envoy OR be a well known end user of sufficient size, scale, and impact to make your inclusion worthwhile. We distribute the "Seven" distribution of Envoy [link]. We have been doing this since 1999 before proxies were even cool. OR We use Envoy for our #1 rated cat video service and have 40 billion MAU, proxying 40 trillion^2 RPS through Envoy at the edge. Secure cat videos are our top priority. We also contribute a lot to the Envoy community by implementing features, not making Matt ask for documentation or tests, and writing blog posts about efficient Envoy cat video serving. > 2. Have a user or customer base not limited to your own organization. Please specify an > approximate size of your user or customer base, including the number of > production deployments. Our user base spans of the extensive "Seven" community. We have a slack and GitHub repos and mailing lists where the community hangs out. We have ~2000 customers, of which approximately 400 are using Seven in production. [links] > 3. Have a publicly verifiable track record up to present day of fixing security issues. We announce on our blog all upstream patches we apply to "Seven." [link to blog posts] > 4. Not be a downstream or rebuild of another distribution. If you offer Envoy as a publicly > available infrastructure or platform service, this condition does not need to apply. This does not apply, "Seven" is a unique snowflake distribution. > 5. Be a participant and active contributor in the community. Our members, Acidburn, Cereal, and ZeroCool are outstanding members and are well known throughout the Envoy community. Especially for their contributions in hacking the Gibson. > 6. Accept the Embargo Policy that is outlined above. You must have a way to privately stage and validate your updates that does not violate the embargo. We accept. > 7. Be willing to contribute back as outlined above. We are definitely willing to help! > 8. Be able to perform a security release of your product within a three week window from candidate fix patch availability. We affirm we can spin out new security releases within a 2 week window. > 9. Have someone already on the list vouch for the person requesting membership > on behalf of your distribution. CrashOverride will vouch for the "Seven" distribution joining the distribution list. > 10. Nominate an e-mail alias or list for your organization to receive updates. This should not be an individual user address, but instead a list that can be maintained by your organization as individuals come and go. A good example is envoy-security@seven.com, a bad example is acidburn@seven.com. You must accept the invite sent to this address or you will not receive any e-mail updates. This e-mail address will be shared with the Envoy community. envoy-security@seven.com ``` ### Review of membership criteria In all cases, members of the distribution list will be reviewed on a yearly basis by the maintainers and security team to ensure they still qualify for inclusion on the list. ### Members | E-mail | Organization | End User | Last Review | |-------------------------------------------------------|:-------------:|:--------:|:-----------:| | envoy-security-team@aspenmesh.io | Aspen Mesh | No | 12/19 | | aws-app-mesh-security@amazon.com | AWS | No | 12/19 | | security@cilium.io | Cilium | No | 12/19 | | vulnerabilityreports@cloudfoundry.org | Cloud Foundry | No | 12/19 | | secalert@datawire.io | Datawire | No | 12/19 | | google-internal-envoy-security@google.com | Google | No | 12/19 | | argoprod@us.ibm.com | IBM | No | 12/19 | | istio-security-vulnerability-reports@googlegroups.com | Istio | No | 12/19 | | secalert@redhat.com | Red Hat | No | 12/19 | | envoy-security@solo.io | solo.io | No | 12/19 | | envoy-security@tetrate.io | Tetrate | No | 12/19 | | security@vmware.com | VMware | No | 12/19 | | envoy-security@pinterest.com | Pinterest | Yes | 12/19 | | envoy-security@dropbox.com | Dropbox | Yes | 01/20 | | envoy-security-predisclosure@stripe.com | Stripe | Yes | 01/20 | ================================================ FILE: STYLE.md ================================================ # C++ coding style * The Envoy source code is formatted using clang-format. Thus all white spaces, etc. issues are taken care of automatically. The CircleCI tests will automatically check the code format and fail. There are make targets that can both check the format (check_format) as well as fix the code format for you (fix_format). Errors in .clang-tidy are enforced while other warnings are suggestions. Note that code and comment blocks designated `clang-format off` must be closed with `clang-format on`. To run these checks locally, see [Support Tools](support/README.md). * Beyond code formatting, for the most part Envoy uses the [Google C++ style guidelines](https://google.github.io/styleguide/cppguide.html). The following section covers the major areas where we deviate from the Google guidelines. # Repository file layout * Please see [REPO_LAYOUT.md](REPO_LAYOUT.md). # Documentation * If you are modifying the data plane structually, please keep the [Life of a Request](docs/root/intro/life_of_a_request.md) documentation up-to-date. # Deviations from Google C++ style guidelines * Exceptions are allowed and encouraged where appropriate. When using exceptions, do not add additional error handing that cannot possibly happen in the case an exception is thrown. * Do use exceptions for: - Configuration ingestion error handling. Invalid configurations (dynamic and static) should throw meaningful `EnvoyException`s, the configuration ingestion code will catch these. - Constructor failure. - Error handling in deep call stacks, where exceptions provide material improvements to code complexity and readability. * Apply caution when using exceptions on the data path for general purpose error handling. Exceptions are not caught on the data path and they should not be used for simple error handling, e.g. with shallow call stacks, where explicit error handling provides a more readable and easier to reason about implementation. * References are always preferred over pointers when the reference cannot be null. This includes both const and non-const references. * Function names should all use camel case starting with a lower case letter (e.g., `doFoo()`). * Struct/Class member variables have a `_` postfix (e.g., `int foo_;`). * Enum values using PascalCase (e.g., `RoundRobin`). * 100 columns is the line limit. * Use your GitHub name in TODO comments, e.g. `TODO(foobar): blah`. * Smart pointers are type aliased: * `using FooPtr = std::unique_ptr;` * `using BarSharedPtr = std::shared_ptr;` * `using BlahConstSharedPtr = std::shared_ptr;` * Regular pointers (e.g. `int* foo`) should not be type aliased. * `absl::optional> is type aliased: * `using FooOptRef = absl::optional>;` * `using FooOptConstRef = absl::optional>;` * If move semantics are intended, prefer specifying function arguments with `&&`. E.g., `void onHeaders(Http::HeaderMapPtr&& headers, ...)`. The rationale for this is that it forces the caller to specify `std::move(...)` or pass a temporary and makes the intention at the callsite clear. Otherwise, it's difficult to tell if a const reference is actually being passed to the called function. This is true even for `std::unique_ptr`. * Prefer `unique_ptr` over `shared_ptr` wherever possible. `unique_ptr` makes ownership in production code easier to reason about. Note that this creates some test oddities where production code requires a `unique_ptr` but the test must still have access to the memory the production code is using (mock or otherwise). In these cases it is acceptable to allocate raw memory in a test and return it to the production code with the expectation that the production code will hold it in a `unique_ptr` and free it. Envoy uses the factory pattern quite a bit for these cases. (Search the code for "factory"). * The Google C++ style guide points out that [non-PoD static and global variables are forbidden](https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables). This _includes_ types such as `std::string`. We encourage the use of the advice in the [C++ FAQ on the static initialization fiasco](https://isocpp.org/wiki/faq/ctors#static-init-order-on-first-use) for how to best handle this. * The Google C++ style guide points out that [constant vars should be named `kConstantVar`](https://google.github.io/styleguide/cppguide.html#Constant_Names). In the Envoy codebase we use `ConstantVar` or `CONSTANT_VAR`. If you pick `CONSTANT_VAR`, please be certain the name is globally significant to avoid potential conflicts with #defines, which are not namespace-scoped, and may appear in externally controlled header files. * API-level comments should follow normal Doxygen conventions. Use `@param` to describe parameters and `@return ` for return values. Internal comments for methods and member variables may be regular C++ `//` comments or Doxygen at developer discretion. Where possible, methods should have meaningful documentation on expected input and state preconditions. * Header guards should use `#pragma once`. * All code should be inside a top-level Envoy namespace. There are some exceptions such as `main()` functions. When code cannot be placed inside the Envoy namespace there should be a comment of the form `// NOLINT(namespace-envoy)` at the top of the file. * If a method that must be defined outside the `test` directory is intended to be called only from test code then it should have a name that ends in `ForTest()` such as `aMethodForTest()`. In most cases tests can and should be structured so this is not necessary. * Tests default to StrictMock so will fail if hitting unexpected warnings. Feel free to use NiceMock for mocks whose behavior is not the focus of a test. * [Thread annotations](https://github.com/abseil/abseil-cpp/blob/master/absl/base/thread_annotations.h), such as `GUARDED_BY`, should be used for shared state guarded by locks/mutexes. * Functions intended to be local to a cc file should be declared in an anonymous namespace, rather than using the 'static' keyword. Note that the [Google C++ style guide](https://google.github.io/styleguide/cppguide.html#Unnamed_Namespaces_and_Static_Variables) allows either, but in Envoy we prefer anonymous namespaces. * Braces are required for all control statements include single line if, while, etc. statements. * Don't use [mangled Protobuf enum names](https://developers.google.com/protocol-buffers/docs/reference/cpp-generated#enum). # Error handling A few general notes on our error handling philosophy: * All error code returns should be checked. * At a very high level, our philosophy is that errors should be handled gracefully when caused by: - Untrusted network traffic OR - Raised by the Envoy process environment and are *likely* to happen * Examples of likely environnmental errors include any type of network error, disk IO error, bad data returned by an API call, bad data read from runtime files, etc. Errors in the Envoy environment that are *unlikely* to happen after process initialization, should lead to process death, under the assumption that the additional burden of defensive coding and testing is not an effective use of time for an error that should not happen given proper system setup. Examples of these types of errors include not being able to open the shared memory region, system calls that should not fail assuming correct parameters (which should be validated via tests), etc. Examples of system calls that should not fail when passed valid parameters include the kernel returning a valid `sockaddr` after a successful call to `accept()`, `pthread_create()`, `pthread_join()`, etc. * OOM events (both memory and FDs) are considered fatal crashing errors. An OOM error should never silently be ignored and should crash the process either via the C++ allocation error exception, an explicit `RELEASE_ASSERT` following a third party library call, or an obvious crash on a subsequent line via null pointer dereference. This rule is again based on the philosophy that the engineering costs of properly handling these cases are not worth it. Time is better spent designing proper system controls that shed load if resource usage becomes too high, etc. * The "less is more" error handling philosophy described in the previous two points is primarily based on the fact that restarts are designed to be fast, reliable and cheap. * Although we strongly recommend that any type of startup error leads to a fatal error, since this is almost always a result of faulty configuration which should be caught during a canary process, there may be cases in which we want some classes of startup errors to be non-fatal. For example, if a misconfigured option is not necessary for server operation. Although this is discouraged, we will discuss these on a case by case basis during code review (an example of this is the `--admin-address-path` option). **If degraded mode error handling is implemented, we require that there is complete test coverage for the degraded case.** Additionally, the user should be aware of the degraded state minimally via an error log of level warn or greater and via the increment of a stat. * If you do need to log a non-fatal warning or error, you can unit-test it with EXPECT_LOG_CONTAINS or EXPECT_NO_LOGS from [logging.h](test/test_common/logging.h). It's generally bad practice to test by depending on log messages unless the actual behavior being validated is logging. It's preferable to export statistics to enable consumption by external monitoring for any behavior that should be externally consumed or to introduce appropriate internal interfaces such as mocks for internal behavior. * The error handling philosophy described herein is based on the assumption that Envoy is deployed using industry best practices (primarily canary). Major and obvious errors should always be caught in canary. If a low rate error leads to periodic crash cycling when deployed to production, the error rate should allow for rollback without large customer impact. * Tip: If the thought of adding the extra test coverage, logging, and stats to handle an error and continue seems ridiculous because *"this should never happen"*, it's a very good indication that the appropriate behavior is to terminate the process and not handle the error. When in doubt, please discuss. * Per above it's acceptable to turn failures into crash semantics via `RELEASE_ASSERT(condition)` or `PANIC(message)` if there is no other sensible behavior, e.g. in OOM (memory/FD) scenarios. Only `RELEASE_ASSERT(condition)` should be used to validate conditions that might be imposed by the external environment. `ASSERT(condition)` should be used to document (and check in debug-only builds) program invariants. Use `ASSERT` liberally, but do not use it for things that will crash in an obvious way in a subsequent line. E.g., do not do `ASSERT(foo != nullptr); foo->doSomething();`. Note that there is a gray line between external environment failures and program invariant violations. For example, memory corruption due to a security issue (a bug, deliberate buffer overflow etc.) might manifest as a violation of program invariants or as a detectable condition in the external environment (e.g. some library returning a highly unexpected error code or buffer contents). Unfortunately no rule can cleanly cover when to use `RELEASE_ASSERT` vs. `ASSERT`. In general we view `ASSERT` as the common case and `RELEASE_ASSERT` as the uncommon case, but experience and judgment may dictate a particular approach depending on the situation. # Hermetic and deterministic tests Tests should be hermetic, i.e. have all dependencies explicitly captured and not depend on the local environment. In general, there should be no non-local network access. In addition: * Port numbers should not be hardcoded. Tests should bind to port zero and then discover the bound port when needed. This avoids flakes due to conflicting ports and allows tests to be executed concurrently by Bazel. See [`test/integration/integration_test.h`](test/integration/integration_test.h) and [`test/common/network/listener_impl_test.cc`](test/common/network/listener_impl_test.cc) for examples of tests that do this. * Paths should be constructed using: * The methods in [`TestEnvironment`](test/test_common/environment.h) for C++ tests. * With `${TEST_TMPDIR}` (for writable temporary space) or `${TEST_SRCDIR}` for read-only access to test inputs in shell tests. * With `{{ test_tmpdir }}`, `{{ test_rundir }}` and `{{ test_udsdir }}` respectively for JSON templates. `{{ test_udsdir }}` is provided for pathname based Unix Domain Sockets, which must fit within a 108 character limit on Linux, a property that might not hold for `{{ test_tmpdir }}`. Tests should be deterministic. They should not rely on randomness or details such as the current time. Instead, mocks such as [`MockRandomGenerator`](test/mocks/runtime/mocks.h) and [`Mock*TimeSource`](test/mocks/common.h) should be used. # Google style guides for other languages * [Python](https://google.github.io/styleguide/pyguide.html) * [Bash](https://google.github.io/styleguide/shell.xml) * [Bazel](https://bazel.build/versions/master/docs/skylark/build-style.html) ================================================ FILE: VERSION ================================================ 1.16.0 ================================================ FILE: WORKSPACE ================================================ workspace(name = "envoy") load("//bazel:api_binding.bzl", "envoy_api_binding") envoy_api_binding() load("//bazel:api_repositories.bzl", "envoy_api_dependencies") envoy_api_dependencies() load("//bazel:repositories.bzl", "envoy_dependencies") envoy_dependencies() load("//bazel:repositories_extra.bzl", "envoy_dependencies_extra") envoy_dependencies_extra() load("//bazel:dependency_imports.bzl", "envoy_dependency_imports") envoy_dependency_imports() ================================================ FILE: api/API_OVERVIEW.md ================================================ # Envoy v2 APIs for developers ## Goals This repository contains both the implemented and draft v2 JSON REST and gRPC [Envoy](https://github.com/envoyproxy/envoy/) APIs. Version 2 of the Envoy API evolves existing APIs and introduces new APIs to: * Allow for more advanced load balancing through load and resource utilization reporting to management servers. * Improve N^2 health check scalability issues by optionally offloading health checking to other Envoy instances. * Support Envoy deployment in edge, sidecar and middle proxy deployment models via changes to the listener model, CDS API, and EDS (formerly called SDS in v1) API. * Allow streaming updates from the management server on change, instead of polling APIs from Envoy. gRPC APIs will be supported alongside JSON REST APIs to provide for this. * Ensure all Envoy runtime configuration is dynamically discoverable via API calls, including listener configuration, certificates and runtime settings, which are today sourced from the filesystem. There will still remain a static bootstrap configuration file that will specify items unlikely to change during runtime, including the Envoy node identity, xDS management server addresses, administration interface and tracing configuration. * Revisit and where appropriate cleanup any v1 technical debt. ## Status See [here](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview.html#status) for the current status of the v2 APIs. See [here](CONTRIBUTING.md#api-changes) for the v2 API change process. ## Principles * [Proto3](https://developers.google.com/protocol-buffers/docs/proto3) will be used to specify the canonical API. This will provide directly the gRPC API and via gRPC-JSON transcoding the JSON REST API. A textual YAML input will be supported for filesystem configuration files (e.g. the bootstrap file), in addition to JSON, as a syntactic convenience. YAML file contents will be internally converted to JSON and then follow the standard JSON-proto3 conversion during Envoy config ingestion. * xDS APIs should support eventual consistency. For example, if RDS references a cluster that has not yet been supplied by CDS, it should be silently ignored and traffic not forwarded until the CDS update occurs. Stronger consistency guarantees are possible if the management server is able to sequence the xDS APIs carefully (for example by using the ADS API below). By following the `[CDS, EDS, LDS, RDS]` sequence for all pertinent resources, it will be possible to avoid traffic outages during configuration update. * The API is primarily intended for machine generation and consumption. It is expected that the management server is responsible for mapping higher level configuration concepts to API responses. Similarly, static configuration fragments may be generated by templating tools, etc. The APIs and tools used to generate xDS configuration are beyond the scope of the definitions in this repository. * REST-JSON API equivalents will be provided for the basic singleton xDS subscription services CDS/EDS/LDS/RDS/SDS. Advanced APIs such as HDS, ADS and EDS multi-dimensional LB will be gRPC only. This avoids having to map complicated bidirectional stream semantics onto REST. * Listeners will be immutable. Any updates to a listener via LDS will require the draining of existing connections for the specific bound IP/port. As a result, new requests will only be guaranteed to observe the new configuration after existing connections have drained or the drain timeout. * Versioning will be expressed via [proto3 package namespaces](https://developers.google.com/protocol-buffers/docs/proto3#packages), i.e. `package envoy.api.v2;`. * Custom components (e.g. filters, resolvers, loggers) will use a reverse DNS naming scheme, e.g. `com.google.widget`, `com.lyft.widget`. ## APIs Unless otherwise stated, the APIs with the same names as v1 APIs have a similar role. * [Cluster Discovery Service (CDS)](envoy/api/v2/cds.proto). * [Endpoint Discovery Service (EDS)](envoy/api/v2/eds.proto). This has the same role as SDS in the [v1 API](https://www.envoyproxy.io/docs/envoy/latest/api-v1/cluster_manager/sds), the new name better describes what the API does in practice. Advanced global load balancing capable of utilizing N-dimensional upstream metrics is now supported. * [Health Discovery Service (HDS)](envoy/service/discovery/v2/hds.proto). This new API supports efficient endpoint health discovery by the management server via the Envoy instances it manages. Individual Envoy instances will typically receive HDS instructions to health check a subset of all endpoints. The health check subset may not be a subset of the Envoy instance's EDS endpoints. * [Listener Discovery Service (LDS)](envoy/api/v2/lds.proto). This new API supports dynamic discovery of the listener configuration (which ports to bind to, TLS details, filter chains, etc.). * [Metric Service (MS)](envoy/service/metrics/v2/metrics_service.proto). This new API allows Envoy to push (stream) metrics forever for servers to consume. * [Rate Limit Service (RLS)](envoy/service/ratelimit/v2/rls.proto) * [Route Discovery Service (RDS)](envoy/api/v2/rds.proto). * [Secret Discovery Service (SDS)](envoy/service/discovery/v2/sds.proto). In addition to the above APIs, an aggregation API will be provided to allow for fine grained control over the sequencing of API updates across discovery services: * [Aggregated Discovery Service (ADS)](envoy/api/v2/discovery.proto). See the [ADS overview](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview#aggregated-discovery-service). A protocol description for the xDS APIs is provided [here](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol). ## Terminology Some relevant [existing terminology](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/terminology.html) is repeated below and some new v2 terms introduced. * Cluster: A cluster is a group of logically similar endpoints that Envoy connects to. In v2, RDS routes points to clusters, CDS provides cluster configuration and Envoy discovers the cluster members via EDS. * Downstream: A downstream host connects to Envoy, sends requests, and receives responses. * Endpoint: An endpoint is an upstream host that is a member of one or more clusters. Endpoints are discovered via EDS. * Listener: A listener is a named network location (e.g., port, unix domain socket, etc.) that can be connected to by downstream clients. Envoy exposes one or more listeners that downstream hosts connect to. * Locality: A location where an Envoy instance or an endpoint runs. This includes region, zone and sub-zone identification. * Management server: A logical server implementing the v2 Envoy APIs. This is not necessarily a single physical machine since it may be replicated/sharded and API serving for different xDS APIs may be implemented on different physical machines. * Region: Geographic region where a zone is located. * Sub-zone: Location within a zone where an Envoy instance or an endpoint runs. This allows for multiple load balancing targets within a zone. * Upstream: An upstream host receives connections and requests from Envoy and returns responses. * xDS: CDS/EDS/HDS/LDS/RLS/RDS/SDS APIs. * Zone: Availability Zone (AZ) in AWS, Zone in GCP. ================================================ FILE: api/API_VERSIONING.md ================================================ # API versioning guidelines The Envoy project (and in the future [UDPA](https://github.com/cncf/udpa)) takes API stability and versioning seriously. Providing stable APIs is a necessary step in ensuring API adoption and success of the ecosystem. Below we articulate the API versioning guidelines that aim to deliver this stability. # API semantic versioning The Envoy APIs consist of a family of packages, e.g. `envoy.admin.v2alpha`, `envoy.service.trace.v2`. Each package is independently versioned with a protobuf semantic versioning scheme based on https://cloud.google.com/apis/design/versioning. The major version for a package is captured in its name (and directory structure). E.g. version 2 of the tracing API package is named `envoy.service.trace.v2` and its constituent protos are located in `api/envoy/service/trace/v2`. Every protobuf must live directly in a versioned package namespace, we do not allow subpackages such as `envoy.service.trace.v2.somethingelse`. Minor and patch versions will be implemented in the future, this effort is tracked in https://github.com/envoyproxy/envoy/issues/8416. In everyday discussion and GitHub labels, we refer to the `v2`, `v3`, `vN`, `...` APIs. This has a specific technical meaning. Any given message in the Envoy API, e.g. the `Bootstrap` at `envoy.config.bootstrap.v3.Boostrap`, will transitively reference a number of packages in the Envoy API. These may be at `vN`, `v(N-1)`, etc. The Envoy API is technically a DAG of versioned package namespaces. When we talk about the `vN xDS API`, we really refer to the `N` of the root configuration resources (e.g. bootstrap, xDS resources such as `Cluster`). The v3 API bootstrap configuration is `envoy.config.bootstrap.v3.Boostrap`, even though it might might transitively reference `envoy.service.trace.v2`. # Backwards compatibility In general, within a package's major API version, we do not allow any breaking changes. The guiding principle is that neither the wire format nor protobuf compiler generated language bindings should experience a backward compatible break on a change. Specifically: * Fields should not be renumbered or have their types changed. This is standard proto development procedure. * Renaming of fields or package namespaces for a proto must not occur. This is inherently dangerous, since: * Field renames break wire compatibility. This is stricter than standard proto development procedure in the sense that it does not break binary wire format. However, it **does** break loading of YAML/JSON into protos as well as text protos. Since we consider YAML/JSON to be first class inputs, we must not change field names. * For service definitions, the gRPC endpoint URL is inferred from package namespace, so this will break client/server communication. * For a message embedded in an `Any` object, the type URL, which the package namespace is a part of, may be used by Envoy or other API consuming code. Currently, this applies to the top-level resources embedded in `DiscoveryResponse` objects, e.g. `Cluster`, `Listener`, etc. * Consuming code will break and require source code changes to match the API changes. * Some other changes are considered breaking for Envoy APIs that are usually considered safe in terms of protobuf wire compatibility: * Upgrading a singleton field to a repeated, e.g. `uint32 foo = 1;` to `repeated uint32 foo = 1`. This changes the JSON wire representation and hence is considered a breaking change. * Wrapping an existing field with `oneof`. This has no protobuf or JSON/YAML wire implications, but is disruptive to various consuming stubs in languages such as Go, creating unnecessary churn. * Increasing the strictness of [protoc-gen-validate](https://github.com/envoyproxy/protoc-gen-validate) annotations. Exceptions may be granted for scenarios in which these stricter conditions model behavior already implied structurally or by documentation. The exception to the above policy is for API versions tagged `vNalpha`. Within an alpha major version, arbitrary breaking changes are allowed. Note that changes to default values for wrapped types, e.g. `google.protobuf.UInt32Value` are not governed by the above policy. Any management server requiring stability across Envoy API or implementations within a major version should set explicit values for these fields. # API lifecycle A new major version is a significant event in the xDS API ecosystem, inevitably requiring support from clients (Envoy, gRPC) and a large number of control planes, ranging from simple in-house custom management servers to xDS-as-a-service offerings run by vendors. The [xDS API shepherds](https://github.com/orgs/envoyproxy/teams/api-shepherds) will make the decision to add a new major version subject to the following constraints: * There exists sufficient technical debt in the xDS APIs in the existing supported major version to justify the cost burden for xDS client/server implementations. * At least one year has elapsed since the last major version was cut. * Consultation with the Envoy community (via Envoy community call, `#xds` channel on Slack), as well as gRPC OSS community (via reaching out to language maintainers) is made. This is not a veto process; the API shepherds retain the right to move forward with a new major API version after weighing this input with the first two considerations above. Following the release of a new major version, the API lifecycle follows a deprecation clock. Envoy will support at most three major versions of any API package at all times: * The current stable major version, e.g. v3. * The previous stable major version, e.g. v2. This is needed to ensure that we provide at least 1 year for a supported major version to sunset. By supporting two stable major versions simultaneously, this makes it easier to coordinate control plane and Envoy rollouts as well. This previous stable major version will be supported for exactly 1 year after the introduction of the new current stable major version, after which it will be removed from the Envoy implementation. * Optionally, the next experimental alpha major version, e.g. v4alpha. This is a release candidate for the next stable major version. This is only generated when the current stable major version requires a breaking change at the next cycle, e.g. a deprecation or field rename. This release candidate is mechanically generated via the [protoxform](https://github.com/envoyproxy/envoy/tree/master/tools/protoxform) tool from the current stable major version, making use of annotations such as `deprecated = true`. This is not a human editable artifact. An example of how this might play out is that at the end of December in 2020, if a v4 major version is justified, we might freeze `envoy.config.bootstrap.v4alpha` and this package would then become the current stable major version `envoy.config.bootstrap.v4`. The `envoy.config.bootstrap.v3` package will become the previous stable major version and support for `envoy.config.bootstrap.v2` will be dropped from the Envoy implementation. Note that some transitively referenced package, e.g. `envoy.config.filter.network.foo.v2` may remain at version 2 during this release, if no changes were made to the referenced package. If no major version is justified at this point, the decision to cut v4 might occur at some point in 2021 or beyond, however v2 support will still be removed at the end of 2020. The implication of this API lifecycle and clock is that any deprecated feature in the Envoy API will retain implementation support for at least 1-2 years. We are currently working on a strategy to introduce minor versions (https://github.com/envoyproxy/envoy/issues/8416). This will bump the xDS API minor version on every deprecation and field introduction/modification. This will provide an opportunity for the control plane to condition on client and major/minor API version support. Currently under discussion, but not finalized will be the sunsetting of Envoy client support for deprecated features after a year of support within a major version. Please post to https://github.com/envoyproxy/envoy/issues/8416 any thoughts around this. # New API features The Envoy APIs can be [safely extended](https://cloud.google.com/apis/design/compatibility) with new packages, messages, enums, fields and enum values, while maintaining [backwards compatibility](#backwards-compatibility). Additions to the API for a given package should normally only be made to the *current stable major version*. The rationale for this policy is that: * The feature is immediately available to Envoy users who consume the current stable major version. This would not be the case if the feature was placed in `vNalpha`. * `vNalpha` can be mechanically generated from `vN` without requiring developers to maintain the new feature in both locations. * We encourage Envoy users to move to the current stable major version from the previous one to consume new functionality. # When can an API change be made to a package's previous stable major version? As a pragmatic concession, we allow API feature additions to the previous stable major version for a single quarter following a major API version increment. Any changes to the previous stable major version must be manually reflected in a consistent manner in the current stable major version as well. # How to make a breaking change across major versions We maintain [backwards compatibility](#backwards-compatibility) within a major version but allow breaking changes across major versions. This enables API deprecations, cleanups, refactoring and reorganization. The Envoy APIs have a stylized workflow for achieving this. There are two prescribed methods, depending on whether the change is mechanical or manual. ## Mechanical breaking changes Field deprecations, renames, etc. are mechanical changes that are supported by the [protoxform](https://github.com/envoyproxy/envoy/tree/master/tools/protoxform) tool. These are guided by [annotations](STYLE.md#api-annotations). ## Manual breaking changes A manual breaking change is distinct from the mechanical changes such as field deprecation, since in general it requires new code and tests to be implemented in Envoy by hand. For example, if a developer wants to unify `HeaderMatcher` with `StringMatcher` in the route configuration, this is a likely candidate for this class of change. The following steps are required: 1. The new version of the feature, e.g. the `NewHeaderMatcher` message should be added, together with referencing fields, in the current stable major version for the route configuration proto. 2. The Envoy implementation should be changed to consume configuration from the fields added in (1). Translation code (and tests) should be written to map from the existing field and messages to (1). 3. The old message/enum/field/enum value should be annotated as deprecated. 4. At the next major version, `protoxform` will remove the deprecated version automatically. This make-before-break approach ensures that API major version releases are predictable and mechanical, and has the bulk of the Envoy code and test changes owned by feature developers, rather than the API owners. There will be no major `vN` initiative to address technical debt beyond that enabled by the above process. # Client features Not all clients will support all fields and features in a given major API version. In general, it is preferable to use Protobuf semantics to support this, for example: * Ignoring a field's contents is sufficient to indicate that the support is missing in a client. * Setting both deprecated and the new method for expressing a field if support for a range of clients is desired (where this does not involve huge overhead or gymnastics). This approach does not always work, for example: * A route matcher conjunct condition should not be ignored just because the client is missing the ability to implement the match; this might result in route policy bypass. * A client may expect the server to provide a response in a certain format or encoding, for example a JSON encoded `Struct`-in-`Any` representation of opaque extension configuration. For this purpose, we have [client features](https://www.envoyproxy.io/docs/envoy/latest/api/client_features). # One Definition Rule (ODR) To avoid maintaining more than two stable major versions of a package, and to cope with diamond dependency, we add a restriction on how packages may be referenced transitively; a package may have at most one version of another package in its transitive dependency set. This implies that some packages will have a major version bump during a release cycle simply to allow them to catch up to the current stable version of their dependencies. Some of this complexity and churn can be avoided by having strict rules on how packages may reference each other. Package organization and `BUILD` visibility constraints should be used restrictions to maintain a shallow depth in the dependency tree for any given package. # Minimizing the impact of churn In addition to stability, the API versioning policy has an explicit goal of minimizing the developer overhead for the Envoy community, other clients of the APIs (e.g. gRPC), management server vendors and the wider API tooling ecosystem. A certain amount of API churn between major versions is desirable to reduce technical debt and to support API evolution, but too much creates costs and barriers to upgrade. We consider deprecations to be *mandatory changes*. Any deprecation will be removed at the next stable API version. Other mechanical breaking changes are considered *discretionary*. These include changes such as field renames and are largely reflected in protobuf comments. The `protoxform` tool may decide to minimize API churn by deferring application of discretionary changes until a major version cycle where the respective message is undergoing a mandatory change. The Envoy API structure helps with minimizing churn between versions. Developers should architect and split packages such that high churn protos, e.g. HTTP connection manager, are isolated in packages and have a shallow reference hierarchy. ================================================ FILE: api/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@rules_proto//proto:defs.bzl", "proto_library") licenses(["notice"]) # Apache 2 proto_library( name = "v2_protos", visibility = ["//visibility:public"], deps = [ "//envoy/admin/v2alpha:pkg", "//envoy/api/v2:pkg", "//envoy/api/v2/auth:pkg", "//envoy/api/v2/cluster:pkg", "//envoy/api/v2/core:pkg", "//envoy/api/v2/endpoint:pkg", "//envoy/api/v2/listener:pkg", "//envoy/api/v2/ratelimit:pkg", "//envoy/api/v2/route:pkg", "//envoy/config/accesslog/v2:pkg", "//envoy/config/bootstrap/v2:pkg", "//envoy/config/cluster/aggregate/v2alpha:pkg", "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/cluster/redis:pkg", "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/common/tap/v2alpha:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/filter/dubbo/router/v2alpha1:pkg", "//envoy/config/filter/fault/v2:pkg", "//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg", "//envoy/config/filter/http/aws_lambda/v2alpha:pkg", "//envoy/config/filter/http/aws_request_signing/v2alpha:pkg", "//envoy/config/filter/http/buffer/v2:pkg", "//envoy/config/filter/http/cache/v2alpha:pkg", "//envoy/config/filter/http/compressor/v2:pkg", "//envoy/config/filter/http/cors/v2:pkg", "//envoy/config/filter/http/csrf/v2:pkg", "//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/filter/http/dynamo/v2:pkg", "//envoy/config/filter/http/ext_authz/v2:pkg", "//envoy/config/filter/http/fault/v2:pkg", "//envoy/config/filter/http/grpc_http1_bridge/v2:pkg", "//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg", "//envoy/config/filter/http/grpc_stats/v2alpha:pkg", "//envoy/config/filter/http/grpc_web/v2:pkg", "//envoy/config/filter/http/gzip/v2:pkg", "//envoy/config/filter/http/header_to_metadata/v2:pkg", "//envoy/config/filter/http/health_check/v2:pkg", "//envoy/config/filter/http/ip_tagging/v2:pkg", "//envoy/config/filter/http/jwt_authn/v2alpha:pkg", "//envoy/config/filter/http/lua/v2:pkg", "//envoy/config/filter/http/on_demand/v2:pkg", "//envoy/config/filter/http/original_src/v2alpha1:pkg", "//envoy/config/filter/http/rate_limit/v2:pkg", "//envoy/config/filter/http/rbac/v2:pkg", "//envoy/config/filter/http/router/v2:pkg", "//envoy/config/filter/http/squash/v2:pkg", "//envoy/config/filter/http/tap/v2alpha:pkg", "//envoy/config/filter/http/transcoder/v2:pkg", "//envoy/config/filter/listener/http_inspector/v2:pkg", "//envoy/config/filter/listener/original_dst/v2:pkg", "//envoy/config/filter/listener/original_src/v2alpha1:pkg", "//envoy/config/filter/listener/proxy_protocol/v2:pkg", "//envoy/config/filter/listener/tls_inspector/v2:pkg", "//envoy/config/filter/network/client_ssl_auth/v2:pkg", "//envoy/config/filter/network/direct_response/v2:pkg", "//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg", "//envoy/config/filter/network/echo/v2:pkg", "//envoy/config/filter/network/ext_authz/v2:pkg", "//envoy/config/filter/network/http_connection_manager/v2:pkg", "//envoy/config/filter/network/kafka_broker/v2alpha1:pkg", "//envoy/config/filter/network/local_rate_limit/v2alpha:pkg", "//envoy/config/filter/network/mongo_proxy/v2:pkg", "//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg", "//envoy/config/filter/network/rate_limit/v2:pkg", "//envoy/config/filter/network/rbac/v2:pkg", "//envoy/config/filter/network/redis_proxy/v2:pkg", "//envoy/config/filter/network/sni_cluster/v2:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", "//envoy/config/health_checker/redis/v2:pkg", "//envoy/config/listener/v2:pkg", "//envoy/config/metrics/v2:pkg", "//envoy/config/overload/v2alpha:pkg", "//envoy/config/ratelimit/v2:pkg", "//envoy/config/rbac/v2:pkg", "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", "//envoy/config/retry/omit_canary_hosts/v2:pkg", "//envoy/config/retry/omit_host_metadata/v2:pkg", "//envoy/config/retry/previous_hosts/v2:pkg", "//envoy/config/retry/previous_priorities:pkg", "//envoy/config/trace/v2:pkg", "//envoy/config/trace/v2alpha:pkg", "//envoy/config/transport_socket/alts/v2alpha:pkg", "//envoy/config/transport_socket/raw_buffer/v2:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", "//envoy/data/accesslog/v2:pkg", "//envoy/data/cluster/v2alpha:pkg", "//envoy/data/core/v2alpha:pkg", "//envoy/data/dns/v2alpha:pkg", "//envoy/data/tap/v2alpha:pkg", "//envoy/service/accesslog/v2:pkg", "//envoy/service/auth/v2:pkg", "//envoy/service/discovery/v2:pkg", "//envoy/service/event_reporting/v2alpha:pkg", "//envoy/service/load_stats/v2:pkg", "//envoy/service/metrics/v2:pkg", "//envoy/service/ratelimit/v2:pkg", "//envoy/service/status/v2:pkg", "//envoy/service/tap/v2alpha:pkg", "//envoy/service/trace/v2:pkg", "//envoy/type:pkg", "//envoy/type/matcher:pkg", "//envoy/type/metadata/v2:pkg", "//envoy/type/tracing/v2:pkg", ], ) proto_library( name = "v3_protos", visibility = ["//visibility:public"], deps = [ "//envoy/admin/v3:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", "//envoy/config/grpc_credential/v3:pkg", "//envoy/config/health_checker/redis/v2:pkg", "//envoy/config/listener/v3:pkg", "//envoy/config/metrics/v3:pkg", "//envoy/config/overload/v3:pkg", "//envoy/config/ratelimit/v3:pkg", "//envoy/config/rbac/v3:pkg", "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", "//envoy/config/retry/omit_canary_hosts/v2:pkg", "//envoy/config/retry/previous_hosts/v2:pkg", "//envoy/config/route/v3:pkg", "//envoy/config/tap/v3:pkg", "//envoy/config/trace/v3:pkg", "//envoy/data/accesslog/v3:pkg", "//envoy/data/cluster/v3:pkg", "//envoy/data/core/v3:pkg", "//envoy/data/dns/v3:pkg", "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", "//envoy/extensions/access_loggers/wasm/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", "//envoy/extensions/compression/gzip/compressor/v3:pkg", "//envoy/extensions/compression/gzip/decompressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", "//envoy/extensions/filters/http/cache/v3alpha:pkg", "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", "//envoy/extensions/filters/http/csrf/v3:pkg", "//envoy/extensions/filters/http/decompressor/v3:pkg", "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", "//envoy/extensions/filters/http/fault/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg", "//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg", "//envoy/extensions/filters/http/grpc_stats/v3:pkg", "//envoy/extensions/filters/http/grpc_web/v3:pkg", "//envoy/extensions/filters/http/gzip/v3:pkg", "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", "//envoy/extensions/filters/http/health_check/v3:pkg", "//envoy/extensions/filters/http/ip_tagging/v3:pkg", "//envoy/extensions/filters/http/jwt_authn/v3:pkg", "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", "//envoy/extensions/filters/http/lua/v3:pkg", "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", "//envoy/extensions/filters/http/on_demand/v3:pkg", "//envoy/extensions/filters/http/original_src/v3:pkg", "//envoy/extensions/filters/http/ratelimit/v3:pkg", "//envoy/extensions/filters/http/rbac/v3:pkg", "//envoy/extensions/filters/http/router/v3:pkg", "//envoy/extensions/filters/http/squash/v3:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", "//envoy/extensions/filters/http/wasm/v3:pkg", "//envoy/extensions/filters/listener/http_inspector/v3:pkg", "//envoy/extensions/filters/listener/original_dst/v3:pkg", "//envoy/extensions/filters/listener/original_src/v3:pkg", "//envoy/extensions/filters/listener/proxy_protocol/v3:pkg", "//envoy/extensions/filters/listener/tls_inspector/v3:pkg", "//envoy/extensions/filters/network/client_ssl_auth/v3:pkg", "//envoy/extensions/filters/network/direct_response/v3:pkg", "//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg", "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", "//envoy/extensions/filters/network/echo/v3:pkg", "//envoy/extensions/filters/network/ext_authz/v3:pkg", "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", "//envoy/extensions/filters/network/kafka_broker/v3:pkg", "//envoy/extensions/filters/network/local_ratelimit/v3:pkg", "//envoy/extensions/filters/network/mongo_proxy/v3:pkg", "//envoy/extensions/filters/network/mysql_proxy/v3:pkg", "//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/ratelimit/v3:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/stat_sinks/wasm/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/extensions/upstreams/http/generic/v3:pkg", "//envoy/extensions/upstreams/http/http/v3:pkg", "//envoy/extensions/upstreams/http/tcp/v3:pkg", "//envoy/extensions/wasm/v3:pkg", "//envoy/extensions/watchdog/abort_action/v3alpha:pkg", "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", "//envoy/service/cluster/v3:pkg", "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", "//envoy/service/load_stats/v3:pkg", "//envoy/service/metrics/v3:pkg", "//envoy/service/ratelimit/v3:pkg", "//envoy/service/route/v3:pkg", "//envoy/service/runtime/v3:pkg", "//envoy/service/secret/v3:pkg", "//envoy/service/status/v3:pkg", "//envoy/service/tap/v3:pkg", "//envoy/service/trace/v3:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", ], ) proto_library( name = "all_protos", visibility = ["//visibility:public"], deps = [ ":v2_protos", ":v3_protos", ], ) ================================================ FILE: api/CONTRIBUTING.md ================================================ # Contributing guide ## API changes All API changes should follow the [style guide](STYLE.md). API changes are regular PRs in https://github.com/envoyproxy/envoy for the API/configuration changes. They may be as part of a larger implementation PR. Please follow the standard Bazel and CI process for validating build/test sanity of `api/` before submitting a PR. *Note: New .proto files should be added to [BUILD](https://github.com/envoyproxy/envoy/blob/master/api/versioning/BUILD) in order to get the RSTs generated.* ## Documentation changes The Envoy project takes documentation seriously. We view it as one of the reasons the project has seen rapid adoption. As such, it is required that all features have complete documentation. This is generally going to be a combination of API documentation as well as architecture/overview documentation. ### Building documentation locally The documentation can be built locally in the root of https://github.com/envoyproxy/envoy via: ``` docs/build.sh ``` To skip configuration examples validation: ``` SPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh ``` Or to use a hermetic Docker container: ``` ./ci/run_envoy_docker.sh './ci/do_ci.sh docs' ``` This process builds RST documentation directly from the proto files, merges it with the static RST files, and then runs [Sphinx](https://www.sphinx-doc.org/en/stable/rest.html) over the entire tree to produce the final documentation. The generated RST files are not committed as they are regenerated every time the documentation is built. ### Viewing documentation Once the documentation is built, it is available rooted at `generated/docs/index.html`. The generated RST files are also viewable in `generated/rst`. Note also that the generated documentation can be viewed in CI: 1. Open docs job in CircleCI. 2. Navigate to "artifacts" tab. 3. Expand files and click on `index.html`. If you do not see an artifacts tab this is a bug in CircleCI. Try logging out and logging back in. ### Documentation guidelines The following are some general guidelines around documentation. * Cross link as much as possible. Sphinx is fantastic at this. Use it! See ample examples with the existing documentation as a guide. * Please use a **single space** after a period in documentation so that all generated text is consistent. * Comments can be left inside comments if needed (that's pretty deep, right?) via the `[#comment:]` special tag. E.g., ``` // This is a really cool field! // [#comment:TODO(mattklein123): Do something cooler] string foo_field = 3; ``` * Prefer *italics* for emphasis as `backtick` emphasis is somewhat jarring in our Sphinx theme. * All documentation is expected to use proper English grammar with proper punctuation. If you are not a fluent English speaker please let us know and we will help out. ================================================ FILE: api/README.md ================================================ # Data plane API This tree hosts the configuration and APIs that drive [Envoy](https://www.envoyproxy.io/). The APIs are also in some cases used by other proxy solutions that aim to interoperate with management systems and configuration generators that are built against this standard. Thus, we consider these a set of *universal data plane* APIs. See [this](https://medium.com/@mattklein123/the-universal-data-plane-api-d15cec7a) blog post for more information on the universal data plane concept. # Repository structure The API tree can be found at two locations: * https://github.com/envoyproxy/envoy/tree/master/api - canonical read/write home for the APIs. * https://github.com/envoyproxy/data-plane-api - read-only mirror of https://github.com/envoyproxy/envoy/tree/master/api, providing the ability to consume the data plane APIs without the Envoy implementation. # Further API reading * [API overview for developers](API_OVERVIEW.md) * [API overview for users](https://www.envoyproxy.io/docs/envoy/latest/configuration/overview/v2_overview#) * [xDS protocol overview](https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol) * [Contributing guide](CONTRIBUTING.md) ================================================ FILE: api/STYLE.md ================================================ # API style guidelines Generally follow guidance at https://cloud.google.com/apis/design/, in particular for proto3 as described at: * https://cloud.google.com/apis/design/proto3 * https://cloud.google.com/apis/design/naming_convention * https://developers.google.com/protocol-buffers/docs/style A key aspect of our API style is maintaining stability by following the [API versioning guidelines](API_VERSIONING.md). All developers must familiarize themselves with these guidelines, any PR which makes breaking changes to the API will not be merged. In addition, the following conventions should be followed: * Every proto directory should have a `README.md` describing its content. See for example [envoy.service](envoy/service/README.md). * The data plane APIs are primarily intended for machine generation and consumption. It is expected that the management server is responsible for mapping higher level configuration concepts to concrete API concepts. Similarly, static configuration fragments may be generated by tools and UIs, etc. The APIs and tools used to generate xDS configuration are beyond the scope of the definitions in this repository. * Use [wrapped scalar types](https://github.com/google/protobuf/blob/master/src/google/protobuf/wrappers.proto) where there is a real need for the field to have a default value that does not match the proto3 defaults (0/false/""). This should not be done for fields where the proto3 defaults make sense. All things being equal, pick appropriate logic, e.g. enable vs. disable for a `bool` field, such that the proto3 defaults work, but only where this doesn't result in API gymnastics. * Use a `[#not-implemented-hide:]` `protodoc` annotation in comments for fields that lack Envoy implementation. These indicate that the entity is not implemented in Envoy and the entity should be hidden from the Envoy documentation. * Always use plural field names for `repeated` fields, such as `filters`. * Due to the fact that we consider JSON/YAML to be first class inputs, we cannot easily change a a singular field to a repeated field (both due to JSON/YAML array structural differences as well as singular vs. plural field naming). If there is a reasonable expectation that a field may need to be repeated in the future, but we don't need it to be repeated right away, consider making it repeated now but using constraints to enforce a maximum repeated size of 1. E.g.: ```proto repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1, max_items: 1}]; ``` * Always use upper camel case names for message types and enum types without embedded acronyms, such as `HttpRequest`. * Prefer `oneof` selections to boolean overloads of fields, for example, prefer: ```proto oneof path_specifier { string simple_path = 1; string regex_path = 2; } ``` to ```proto string path = 1; bool path_is_regex = 2; ``` This is more efficient, extendable and self-describing. * The API includes two types for representing [percents](envoy/type/percent.proto). `Percent` is effectively a double value in the range 0.0-100.0. `FractionalPercent` is an integral fraction that can be used to create a truncated percentage also in the range 0.0-100.0. In high performance paths, `FractionalPercent` is preferred as randomness calculations can be performed using integral modulo and comparison operations only without any floating point conversions. Typically, most users do not need infinite precision in these paths. * For enum types, if one of the enum values is used for most cases, make it the first enum value with `0` numeric value. Otherwise, define the first enum value like `TYPE_NAME_UNSPECIFIED = 0`, and treat it as an error. This design pattern forces developers to explicitly choose the correct enum value for their use case, and avoid misunderstanding of the default behavior. * Proto fields should be sorted logically, not by field number. ## Package organization API definitions are layered hierarchically in packages from top-to-bottom as following: - `envoy.extensions` contains all definitions for the extensions, the package should match the structure of the `source` directory. - `envoy.service` contains gRPC definitions of supporting services and top-level messages for the services. e.g. `envoy.service.route.v3` contains RDS, `envoy.service.listener.v3` contains LDS. - `envoy.config` contains other definitions for service configuration, bootstrap and some legacy core types. - `envoy.data` contains data format declaration for data types that Envoy produces. - `envoy.type` contains common protobuf types such as percent, range and matchers. Extensions should use the regular hierarchy. For example, configuration for network filters belongs in a package under `envoy.extensions.filter.network`. ## Adding an extension configuration to the API Extensions must currently be added as v3 APIs following the [package organization](#package-organization) above. To add an extension config to the API, the steps below should be followed: 1. If this is still WiP and subject to breaking changes, use `vNalpha` instead of `vN` in steps below. Refer to the [Cache filter config](envoy/extensions/filter/http/cache/v3alpha/cache.proto) as an example of `v3alpha`, and the [Buffer filter config](envoy/extensions/filter/http/buffer/v3/buffer.proto) as an example of `v3`. 1. Place the v3 extension configuration `.proto` in `api/envoy/extensions`, e.g. `api/envoy/extensions/filter/http/foobar/v3/foobar.proto` together with an initial BUILD file: ```bazel load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ``` 1. Add to the v3 extension config proto `import "udpa/annotations/migrate.proto";` and `import "udpa/annotations/status.proto";` 1. If this is still WiP and subject to breaking changes, set `option (udpa.annotations.file_status).work_in_progress = true;`. 1. Add to the v3 extension config proto a file level `option (udpa.annotations.file_status).package_version_status = ACTIVE;`. This is required to automatically include the config proto in [api/versioning/BUILD](versioning/BUILD). 1. Add a reference to the v3 extension config in (1) in [api/versioning/BUILD](versioning/BUILD) under `active_protos`. 1. Run `./tools/proto_format/proto_format.sh fix`. This should regenerate the `BUILD` file, reformat `foobar.proto` as needed and also generate the v4alpha extension config (if needed), together with shadow API protos. 1. `git add api/ generated_api_shadow/` to add any new files to your Git index. ## API annotations A number of annotations are used in the Envoy APIs to provide additional API metadata. We describe these annotations below by category. ### Field level * `[deprecated = true]` to denote fields that are deprecated in a major version. These fields are slated for removal at the next major cycle and follow the [breaking change policy](../CONTRIBUTING.md#breaking-change-policy). * `[envoy.annotations.disallowed_by_default = true]` to denote fields that have been disallowed by default as per the [breaking change policy](../CONTRIBUTING.md#breaking-change-policy). * `[(udpa.annotations.field_migrate).rename = ""]` to denote that the field will be renamed to a given name in the next API major version. * `[(udpa.annotations.field_migrate).oneof_promotion = ""]` to denote that the field will be promoted to a given `oneof` in the next API major version. * `[(udpa.annotations.sensitive) = true]` to denote sensitive fields that should be redacted in output such as logging or configuration dumps. * [PGV annotations](https://github.com/envoyproxy/protoc-gen-validate) to denote field value constraints. ### Enum value level * `[(udpa.annotations.enum_value_migrate).rename = "new enum value name"]` to denote that the enum value will be renamed to a given name in the next API major version. ### Message level * `option (udpa.annotations.versioning).previous_message_type = "";` to denote the previous type name for an upgraded message. You should never have to write these manually, they are generated by `protoxform`. ### Service level * `option (envoy.annotations.resource).type = "";` to denote the resource type for an xDS service definition. ### File level * `option (udpa.annotations.file_migrate).move_to_package = "";` to denote that in the next major version of the API, the file will be moved to the given package. This is consumed by `protoxform`. * `option (udpa.annotations.file_status).work_in_progress = true;` to denote a file that is still work-in-progress and subject to breaking changes. ================================================ FILE: api/bazel/BUILD ================================================ load("@io_bazel_rules_go//proto:compiler.bzl", "go_proto_compiler") licenses(["notice"]) # Apache 2 go_proto_compiler( name = "pgv_plugin_go", options = ["lang=go"], plugin = "@com_envoyproxy_protoc_gen_validate//:protoc-gen-validate", suffix = ".pb.validate.go", valid_archive = False, visibility = ["//visibility:public"], ) ================================================ FILE: api/bazel/api_build_system.bzl ================================================ load("@rules_cc//cc:defs.bzl", "cc_test") load("@com_envoyproxy_protoc_gen_validate//bazel:pgv_proto_library.bzl", "pgv_cc_proto_library") load("@com_github_grpc_grpc//bazel:cc_grpc_library.bzl", "cc_grpc_library") load("@com_google_protobuf//:protobuf.bzl", _py_proto_library = "py_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") load("@io_bazel_rules_go//go:def.bzl", "go_test") load("@rules_proto//proto:defs.bzl", "proto_library") load( "//bazel:external_proto_deps.bzl", "EXTERNAL_PROTO_CC_BAZEL_DEP_MAP", "EXTERNAL_PROTO_GO_BAZEL_DEP_MAP", "EXTERNAL_PROTO_PY_BAZEL_DEP_MAP", ) _PY_PROTO_SUFFIX = "_py_proto" _CC_PROTO_SUFFIX = "_cc_proto" _CC_GRPC_SUFFIX = "_cc_grpc" _GO_PROTO_SUFFIX = "_go_proto" _GO_IMPORTPATH_PREFIX = "github.com/envoyproxy/go-control-plane/" _COMMON_PROTO_DEPS = [ "@com_google_protobuf//:any_proto", "@com_google_protobuf//:descriptor_proto", "@com_google_protobuf//:duration_proto", "@com_google_protobuf//:empty_proto", "@com_google_protobuf//:struct_proto", "@com_google_protobuf//:timestamp_proto", "@com_google_protobuf//:wrappers_proto", "@com_google_googleapis//google/api:http_proto", "@com_google_googleapis//google/api:httpbody_proto", "@com_google_googleapis//google/api:annotations_proto", "@com_google_googleapis//google/rpc:status_proto", "@com_envoyproxy_protoc_gen_validate//validate:validate_proto", ] def _proto_mapping(dep, proto_dep_map, proto_suffix): mapped = proto_dep_map.get(dep) if mapped == None: prefix = "@" + Label(dep).workspace_name if not dep.startswith("//") else "" return prefix + "//" + Label(dep).package + ":" + Label(dep).name + proto_suffix return mapped def _go_proto_mapping(dep): return _proto_mapping(dep, EXTERNAL_PROTO_GO_BAZEL_DEP_MAP, _GO_PROTO_SUFFIX) def _cc_proto_mapping(dep): return _proto_mapping(dep, EXTERNAL_PROTO_CC_BAZEL_DEP_MAP, _CC_PROTO_SUFFIX) def _py_proto_mapping(dep): return _proto_mapping(dep, EXTERNAL_PROTO_PY_BAZEL_DEP_MAP, _PY_PROTO_SUFFIX) # TODO(htuch): Convert this to native py_proto_library once # https://github.com/bazelbuild/bazel/issues/3935 and/or # https://github.com/bazelbuild/bazel/issues/2626 are resolved. def _api_py_proto_library(name, srcs = [], deps = []): _py_proto_library( name = name + _PY_PROTO_SUFFIX, srcs = srcs, default_runtime = "@com_google_protobuf//:protobuf_python", protoc = "@com_google_protobuf//:protoc", deps = [_py_proto_mapping(dep) for dep in deps] + [ "@com_envoyproxy_protoc_gen_validate//validate:validate_py", "@com_google_googleapis//google/rpc:status_py_proto", "@com_google_googleapis//google/api:annotations_py_proto", "@com_google_googleapis//google/api:http_py_proto", "@com_google_googleapis//google/api:httpbody_py_proto", ], visibility = ["//visibility:public"], ) # This defines googleapis py_proto_library. The repository does not provide its definition and requires # overriding it in the consuming project (see https://github.com/grpc/grpc/issues/19255 for more details). def py_proto_library(name, deps = [], plugin = None): srcs = [dep[:-6] + ".proto" if dep.endswith("_proto") else dep for dep in deps] proto_deps = [] # py_proto_library in googleapis specifies *_proto rules in dependencies. # By rewriting *_proto to *.proto above, the dependencies in *_proto rules are not preserved. # As a workaround, manually specify the proto dependencies for the imported python rules. if name == "annotations_py_proto": proto_deps = proto_deps + [":http_py_proto"] # checked.proto depends on syntax.proto, we have to add this dependency manually as well. if name == "checked_py_proto": proto_deps = proto_deps + [":syntax_py_proto"] # py_proto_library does not support plugin as an argument yet at gRPC v1.25.0: # https://github.com/grpc/grpc/blob/v1.25.0/bazel/python_rules.bzl#L72. # plugin should also be passed in here when gRPC version is greater than v1.25.x. _py_proto_library( name = name, srcs = srcs, default_runtime = "@com_google_protobuf//:protobuf_python", protoc = "@com_google_protobuf//:protoc", deps = proto_deps + ["@com_google_protobuf//:protobuf_python"], visibility = ["//visibility:public"], ) def _api_cc_grpc_library(name, proto, deps = []): cc_grpc_library( name = name, srcs = [proto], deps = deps, proto_only = False, grpc_only = True, visibility = ["//visibility:public"], ) def api_cc_py_proto_library( name, visibility = ["//visibility:private"], srcs = [], deps = [], linkstatic = 0, has_services = 0): relative_name = ":" + name proto_library( name = name, srcs = srcs, deps = deps + _COMMON_PROTO_DEPS, visibility = visibility, ) cc_proto_library_name = name + _CC_PROTO_SUFFIX pgv_cc_proto_library( name = cc_proto_library_name, linkstatic = linkstatic, cc_deps = [_cc_proto_mapping(dep) for dep in deps] + [ "@com_google_googleapis//google/api:http_cc_proto", "@com_google_googleapis//google/api:httpbody_cc_proto", "@com_google_googleapis//google/api:annotations_cc_proto", "@com_google_googleapis//google/rpc:status_cc_proto", ], deps = [relative_name], visibility = ["//visibility:public"], ) _api_py_proto_library(name, srcs, deps) # Optionally define gRPC services if has_services: # TODO: when Python services are required, add to the below stub generations. cc_grpc_name = name + _CC_GRPC_SUFFIX cc_proto_deps = [cc_proto_library_name] + [_cc_proto_mapping(dep) for dep in deps] _api_cc_grpc_library(name = cc_grpc_name, proto = relative_name, deps = cc_proto_deps) def api_cc_test(name, **kwargs): cc_test( name = name, **kwargs ) def api_go_test(name, **kwargs): go_test( name = name, **kwargs ) def api_proto_package( name = "pkg", srcs = [], deps = [], has_services = False, visibility = ["//visibility:public"]): if srcs == []: srcs = native.glob(["*.proto"]) name = "pkg" api_cc_py_proto_library( name = name, visibility = visibility, srcs = srcs, deps = deps, has_services = has_services, ) compilers = ["@io_bazel_rules_go//proto:go_proto", "@envoy_api//bazel:pgv_plugin_go"] if has_services: compilers = ["@io_bazel_rules_go//proto:go_grpc", "@envoy_api//bazel:pgv_plugin_go"] # Because RBAC proro depends on googleapis syntax.proto and checked.proto, # which share the same go proto library, it causes duplicative dependencies. # Thus, we use depset().to_list() to remove duplicated depenencies. go_proto_library( name = name + _GO_PROTO_SUFFIX, compilers = compilers, importpath = _GO_IMPORTPATH_PREFIX + native.package_name(), proto = name, visibility = ["//visibility:public"], deps = depset([_go_proto_mapping(dep) for dep in deps] + [ "@com_github_golang_protobuf//ptypes:go_default_library", "@com_github_golang_protobuf//ptypes/any:go_default_library", "@com_github_golang_protobuf//ptypes/duration:go_default_library", "@com_github_golang_protobuf//ptypes/struct:go_default_library", "@com_github_golang_protobuf//ptypes/timestamp:go_default_library", "@com_github_golang_protobuf//ptypes/wrappers:go_default_library", "@com_envoyproxy_protoc_gen_validate//validate:go_default_library", "@com_google_googleapis//google/api:annotations_go_proto", "@com_google_googleapis//google/rpc:status_go_proto", ]).to_list(), ) ================================================ FILE: api/bazel/envoy_http_archive.bzl ================================================ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") def envoy_http_archive(name, locations, **kwargs): # `existing_rule_keys` contains the names of repositories that have already # been defined in the Bazel workspace. By skipping repos with existing keys, # users can override dependency versions by using standard Bazel repository # rules in their WORKSPACE files. existing_rule_keys = native.existing_rules().keys() if name in existing_rule_keys: # This repository has already been defined, probably because the user # wants to override the version. Do nothing. return loc_key = kwargs.pop("repository_key", name) location = locations[loc_key] # HTTP tarball at a given URL. Add a BUILD file if requested. http_archive( name = name, urls = location["urls"], sha256 = location["sha256"], strip_prefix = location.get("strip_prefix", ""), **kwargs ) ================================================ FILE: api/bazel/external_proto_deps.bzl ================================================ # Any external dependency imported in the api/ .protos requires entries in # the maps below, to allow the Bazel proto and language specific bindings to be # inferred from the import directives. # # This file needs to be interpreted as both Python 3 and Starlark, so only the # common subset of Python should be used. # This maps from .proto import directive path to the Bazel dependency path for # external dependencies. Since BUILD files are generated, this is the canonical # place to define this mapping. EXTERNAL_PROTO_IMPORT_BAZEL_DEP_MAP = { "google/api/expr/v1alpha1/checked.proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "google/api/expr/v1alpha1/syntax.proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", "metrics.proto": "@prometheus_metrics_model//:client_model", "opencensus/proto/trace/v1/trace.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", "opencensus/proto/trace/v1/trace_config.proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", } # This maps from the Bazel proto_library target to the Go language binding target for external dependencies. EXTERNAL_PROTO_GO_BAZEL_DEP_MAP = { "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:expr_go_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_go", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_and_config_proto_go", } # This maps from the Bazel proto_library target to the C++ language binding target for external dependencies. EXTERNAL_PROTO_CC_BAZEL_DEP_MAP = { "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_cc_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_cc_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_cc", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_cc", } # This maps from the Bazel proto_library target to the Python language binding target for external dependencies. EXTERNAL_PROTO_PY_BAZEL_DEP_MAP = { "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto": "@com_google_googleapis//google/api/expr/v1alpha1:checked_py_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto": "@com_google_googleapis//google/api/expr/v1alpha1:syntax_py_proto", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_proto_py", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto": "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto_py", } ================================================ FILE: api/bazel/repositories.bzl ================================================ load(":envoy_http_archive.bzl", "envoy_http_archive") load(":repository_locations.bzl", "REPOSITORY_LOCATIONS") def api_dependencies(): envoy_http_archive( "bazel_skylib", locations = REPOSITORY_LOCATIONS, ) envoy_http_archive( "com_envoyproxy_protoc_gen_validate", locations = REPOSITORY_LOCATIONS, ) envoy_http_archive( name = "com_google_googleapis", locations = REPOSITORY_LOCATIONS, ) envoy_http_archive( name = "com_github_cncf_udpa", locations = REPOSITORY_LOCATIONS, ) envoy_http_archive( name = "prometheus_metrics_model", locations = REPOSITORY_LOCATIONS, build_file_content = PROMETHEUSMETRICS_BUILD_CONTENT, ) envoy_http_archive( name = "opencensus_proto", locations = REPOSITORY_LOCATIONS, ) envoy_http_archive( name = "rules_proto", locations = REPOSITORY_LOCATIONS, ) envoy_http_archive( name = "com_github_openzipkin_zipkinapi", locations = REPOSITORY_LOCATIONS, build_file_content = ZIPKINAPI_BUILD_CONTENT, ) PROMETHEUSMETRICS_BUILD_CONTENT = """ load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") api_cc_py_proto_library( name = "client_model", srcs = [ "metrics.proto", ], visibility = ["//visibility:public"], ) go_proto_library( name = "client_model_go_proto", importpath = "github.com/prometheus/client_model/go", proto = ":client_model", visibility = ["//visibility:public"], ) """ OPENCENSUSTRACE_BUILD_CONTENT = """ load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") api_cc_py_proto_library( name = "trace_model", srcs = [ "trace.proto", ], visibility = ["//visibility:public"], ) go_proto_library( name = "trace_model_go_proto", importpath = "trace_model", proto = ":trace_model", visibility = ["//visibility:public"], ) """ ZIPKINAPI_BUILD_CONTENT = """ load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") api_cc_py_proto_library( name = "zipkin", srcs = [ "zipkin-jsonv2.proto", "zipkin.proto", ], visibility = ["//visibility:public"], ) go_proto_library( name = "zipkin_go_proto", proto = ":zipkin", visibility = ["//visibility:public"], ) """ ================================================ FILE: api/bazel/repository_locations.bzl ================================================ DEPENDENCY_REPOSITORIES_SPEC = dict( bazel_skylib = dict( project_name = "bazel-skylib", project_desc = "Common useful functions and rules for Bazel", project_url = "https://github.com/bazelbuild/bazel-skylib", version = "1.0.3", sha256 = "1c531376ac7e5a180e0237938a2536de0c54d93f5c278634818e0efc952dd56c", urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/{version}/bazel-skylib-{version}.tar.gz"], last_updated = "2020-08-27", use_category = ["api"], ), com_envoyproxy_protoc_gen_validate = dict( project_name = "protoc-gen-validate (PGV)", project_desc = "protoc plugin to generate polyglot message validators", project_url = "https://github.com/envoyproxy/protoc-gen-validate", version = "278964a8052f96a2f514add0298098f63fb7f47f", sha256 = "e368733c9fb7f8489591ffaf269170d7658cc0cd1ee322b601512b769446d3c8", strip_prefix = "protoc-gen-validate-{version}", urls = ["https://github.com/envoyproxy/protoc-gen-validate/archive/{version}.tar.gz"], last_updated = "2020-06-09", use_category = ["api"], ), com_github_cncf_udpa = dict( project_name = "Universal Data Plane API", project_desc = "Universal Data Plane API Working Group (UDPA-WG)", project_url = "https://github.com/cncf/udpa", version = "0.0.1", sha256 = "83a7dcc316d741031f34c0409021432b74a39c4811845a177133f02f948fe2d8", strip_prefix = "udpa-{version}", urls = ["https://github.com/cncf/udpa/archive/v{version}.tar.gz"], last_updated = "2020-09-23", use_category = ["api"], ), com_github_openzipkin_zipkinapi = dict( project_name = "Zipkin API", project_desc = "Zipkin's language independent model and HTTP Api Definitions", project_url = "https://github.com/openzipkin/zipkin-api", version = "0.2.2", sha256 = "688c4fe170821dd589f36ec45aaadc03a618a40283bc1f97da8fa11686fc816b", strip_prefix = "zipkin-api-{version}", urls = ["https://github.com/openzipkin/zipkin-api/archive/{version}.tar.gz"], last_updated = "2020-09-23", use_category = ["api"], ), com_google_googleapis = dict( # TODO(dio): Consider writing a Starlark macro for importing Google API proto. project_name = "Google APIs", project_desc = "Public interface definitions of Google APIs", project_url = "https://github.com/googleapis/googleapis", version = "82944da21578a53b74e547774cf62ed31a05b841", sha256 = "a45019af4d3290f02eaeb1ce10990166978c807cb33a9692141a076ba46d1405", strip_prefix = "googleapis-{version}", urls = ["https://github.com/googleapis/googleapis/archive/{version}.tar.gz"], last_updated = "2019-12-02", use_category = ["api"], ), opencensus_proto = dict( project_name = "OpenCensus Proto", project_desc = "Language Independent Interface Types For OpenCensus", project_url = "https://github.com/census-instrumentation/opencensus-proto", version = "0.3.0", sha256 = "b7e13f0b4259e80c3070b583c2f39e53153085a6918718b1c710caf7037572b0", strip_prefix = "opencensus-proto-{version}/src", urls = ["https://github.com/census-instrumentation/opencensus-proto/archive/v{version}.tar.gz"], last_updated = "2020-06-20", use_category = ["api"], ), prometheus_metrics_model = dict( project_name = "Prometheus client model", project_desc = "Data model artifacts for Prometheus", project_url = "https://github.com/prometheus/client_model", version = "60555c9708c786597e6b07bf846d0dc5c2a46f54", sha256 = "6748b42f6879ad4d045c71019d2512c94be3dd86f60965e9e31e44a3f464323e", strip_prefix = "client_model-{version}", urls = ["https://github.com/prometheus/client_model/archive/{version}.tar.gz"], last_updated = "2020-06-23", use_category = ["api"], ), rules_proto = dict( project_name = "Protobuf Rules for Bazel", project_desc = "Protocol buffer rules for Bazel", project_url = "https://github.com/bazelbuild/rules_proto", version = "40298556293ae502c66579620a7ce867d5f57311", sha256 = "aa1ee19226f707d44bee44c720915199c20c84a23318bb0597ed4e5c873ccbd5", strip_prefix = "rules_proto-{version}", urls = ["https://github.com/bazelbuild/rules_proto/archive/{version}.tar.gz"], last_updated = "2020-08-17", use_category = ["api"], ), ) def _format_version(s, version): return s.format(version = version, dash_version = version.replace(".", "-"), underscore_version = version.replace(".", "_")) # Interpolate {version} in the above dependency specs. This code should be capable of running in both Python # and Starlark. def _dependency_repositories(): locations = {} for key, location in DEPENDENCY_REPOSITORIES_SPEC.items(): mutable_location = dict(location) locations[key] = mutable_location # Fixup with version information. if "version" in location: if "strip_prefix" in location: mutable_location["strip_prefix"] = _format_version(location["strip_prefix"], location["version"]) mutable_location["urls"] = [_format_version(url, location["version"]) for url in location["urls"]] return locations REPOSITORY_LOCATIONS = _dependency_repositories() ================================================ FILE: api/envoy/admin/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2/core:pkg", "//envoy/config/bootstrap/v2:pkg", "//envoy/service/tap/v2alpha:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/admin/v2alpha/certs.proto ================================================ syntax = "proto3"; package envoy.admin.v2alpha; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "CertsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Certificates] // Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to // display certificate information. See :ref:`/certs ` for more // information. message Certificates { // List of certificates known to an Envoy. repeated Certificate certificates = 1; } message Certificate { // Details of CA certificate. repeated CertificateDetails ca_cert = 1; // Details of Certificate Chain repeated CertificateDetails cert_chain = 2; } // [#next-free-field: 7] message CertificateDetails { // Path of the certificate. string path = 1; // Certificate Serial Number. string serial_number = 2; // List of Subject Alternate names. repeated SubjectAlternateName subject_alt_names = 3; // Minimum of days until expiration of certificate and it's chain. uint64 days_until_expiration = 4; // Indicates the time from which the certificate is valid. google.protobuf.Timestamp valid_from = 5; // Indicates the time at which the certificate expires. google.protobuf.Timestamp expiration_time = 6; } message SubjectAlternateName { // Subject Alternate Name. oneof name { string dns = 1; string uri = 2; string ip_address = 3; } } ================================================ FILE: api/envoy/admin/v2alpha/clusters.proto ================================================ syntax = "proto3"; package envoy.admin.v2alpha; import "envoy/admin/v2alpha/metrics.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/health_check.proto"; import "envoy/type/percent.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ClustersProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Clusters] // Admin endpoint uses this wrapper for `/clusters` to display cluster status information. // See :ref:`/clusters ` for more information. message Clusters { // Mapping from cluster name to each cluster's status. repeated ClusterStatus cluster_statuses = 1; } // Details an individual cluster's current status. // [#next-free-field: 6] message ClusterStatus { // Name of the cluster. string name = 1; // Denotes whether this cluster was added via API or configured statically. bool added_via_api = 2; // The success rate threshold used in the last interval. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all errors: externally and locally generated were used to calculate the threshold. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*, only externally generated errors were used to calculate the threshold. // The threshold is used to eject hosts based on their success rate. See // :ref:`Cluster outlier detection ` documentation for details. // // Note: this field may be omitted in any of the three following cases: // // 1. There were not enough hosts with enough request volume to proceed with success rate based // outlier ejection. // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.Percent success_rate_ejection_threshold = 3; // Mapping from host address to the host's current status. repeated HostStatus host_statuses = 4; // The success rate threshold used in the last interval when only locally originated failures were // taken into account and externally originated errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*. The threshold is used to eject hosts based on their success rate. // See :ref:`Cluster outlier detection ` documentation for // details. // // Note: this field may be omitted in any of the three following cases: // // 1. There were not enough hosts with enough request volume to proceed with success rate based // outlier ejection. // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.Percent local_origin_success_rate_ejection_threshold = 5; } // Current state of a particular host. // [#next-free-field: 10] message HostStatus { // Address of this host. api.v2.core.Address address = 1; // List of stats specific to this host. repeated SimpleMetric stats = 2; // The host's current health status. HostHealthStatus health_status = 3; // Request success rate for this host over the last calculated interval. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all errors: externally and locally generated were used in success rate // calculation. If // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*, only externally generated errors were used in success rate calculation. // See :ref:`Cluster outlier detection ` documentation for // details. // // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. type.Percent success_rate = 4; // The host's weight. If not configured, the value defaults to 1. uint32 weight = 5; // The hostname of the host, if applicable. string hostname = 6; // The host's priority. If not configured, the value defaults to 0 (highest priority). uint32 priority = 7; // Request success rate for this host over the last calculated // interval when only locally originated errors are taken into account and externally originated // errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*. // See :ref:`Cluster outlier detection ` documentation for // details. // // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. type.Percent local_origin_success_rate = 8; // locality of the host. api.v2.core.Locality locality = 9; } // Health status for a host. // [#next-free-field: 7] message HostHealthStatus { // The host is currently failing active health checks. bool failed_active_health_check = 1; // The host is currently considered an outlier and has been ejected. bool failed_outlier_check = 2; // The host is currently being marked as degraded through active health checking. bool failed_active_degraded_check = 4; // The host has been removed from service discovery, but is being stabilized due to active // health checking. bool pending_dynamic_removal = 5; // The host has not yet been health checked. bool pending_active_hc = 6; // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported // here. // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] api.v2.core.HealthStatus eds_health_status = 3; } ================================================ FILE: api/envoy/admin/v2alpha/config_dump.proto ================================================ syntax = "proto3"; package envoy.admin.v2alpha; import "envoy/config/bootstrap/v2/bootstrap.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ConfigDumpProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: ConfigDump] // The :ref:`/config_dump ` admin endpoint uses this wrapper // message to maintain and serve arbitrary configuration information from any component in Envoy. message ConfigDump { // This list is serialized and dumped in its entirety at the // :ref:`/config_dump ` endpoint. // // The following configurations are currently supported and will be dumped in the order given // below: // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, // :ref:`/config_dump?mask={} `, // or :ref:`/config_dump?resource={},mask={} // ` for more information. repeated google.protobuf.Any configs = 1; } message UpdateFailureState { // What the component configuration would have been if the update had succeeded. google.protobuf.Any failed_configuration = 1; // Time of the latest failed update attempt. google.protobuf.Timestamp last_update_attempt = 2; // Details about the last failed update attempt. string details = 3; } // This message describes the bootstrap configuration that Envoy was started with. This includes // any CLI overrides that were merged. Bootstrap configuration information can be used to recreate // the static portions of an Envoy configuration by reusing the output as the bootstrap // configuration for another Envoy. message BootstrapConfigDump { config.bootstrap.v2.Bootstrap bootstrap = 1; // The timestamp when the BootstrapConfig was last updated. google.protobuf.Timestamp last_updated = 2; } // Envoy's listener manager fills this message with all currently known listeners. Listener // configuration information can be used to recreate an Envoy configuration by populating all // listeners as static listeners or by returning them in a LDS response. message ListenersConfigDump { // Describes a statically loaded listener. message StaticListener { // The listener config. google.protobuf.Any listener = 1; // The timestamp when the Listener was last successfully updated. google.protobuf.Timestamp last_updated = 2; } message DynamicListenerState { // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time // that the listener was loaded. In the future, discrete per-listener versions may be supported // by the API. string version_info = 1; // The listener config. google.protobuf.Any listener = 2; // The timestamp when the Listener was last successfully updated. google.protobuf.Timestamp last_updated = 3; } // Describes a dynamically loaded listener via the LDS API. // [#next-free-field: 6] message DynamicListener { // The name or unique id of this listener, pulled from the DynamicListenerState config. string name = 1; // The listener state for any active listener by this name. // These are listeners that are available to service data plane traffic. DynamicListenerState active_state = 2; // The listener state for any warming listener by this name. // These are listeners that are currently undergoing warming in preparation to service data // plane traffic. Note that if attempting to recreate an Envoy configuration from a // configuration dump, the warming listeners should generally be discarded. DynamicListenerState warming_state = 3; // The listener state for any draining listener by this name. // These are listeners that are currently undergoing draining in preparation to stop servicing // data plane traffic. Note that if attempting to recreate an Envoy configuration from a // configuration dump, the draining listeners should generally be discarded. DynamicListenerState draining_state = 4; // Set if the last update failed, cleared after the next successful update. UpdateFailureState error_state = 5; } // This is the :ref:`version_info ` in the // last processed LDS discovery response. If there are only static bootstrap listeners, this field // will be "". string version_info = 1; // The statically loaded listener configs. repeated StaticListener static_listeners = 2; // State for any warming, active, or draining listeners. repeated DynamicListener dynamic_listeners = 3; } // Envoy's cluster manager fills this message with all currently known clusters. Cluster // configuration information can be used to recreate an Envoy configuration by populating all // clusters as static clusters or by returning them in a CDS response. message ClustersConfigDump { // Describes a statically loaded cluster. message StaticCluster { // The cluster config. google.protobuf.Any cluster = 1; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 2; } // Describes a dynamically loaded cluster via the CDS API. message DynamicCluster { // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by // the API. string version_info = 1; // The cluster config. google.protobuf.Any cluster = 2; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 3; } // This is the :ref:`version_info ` in the // last processed CDS discovery response. If there are only static bootstrap clusters, this field // will be "". string version_info = 1; // The statically loaded cluster configs. repeated StaticCluster static_clusters = 2; // The dynamically loaded active clusters. These are clusters that are available to service // data plane traffic. repeated DynamicCluster dynamic_active_clusters = 3; // The dynamically loaded warming clusters. These are clusters that are currently undergoing // warming in preparation to service data plane traffic. Note that if attempting to recreate an // Envoy configuration from a configuration dump, the warming clusters should generally be // discarded. repeated DynamicCluster dynamic_warming_clusters = 4; } // Envoy's RDS implementation fills this message with all currently loaded routes, as described by // their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration // or defined inline while configuring listeners are separated from those configured dynamically via RDS. // Route configuration information can be used to recreate an Envoy configuration by populating all routes // as static routes or by returning them in RDS responses. message RoutesConfigDump { message StaticRouteConfig { // The route config. google.protobuf.Any route_config = 1; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 2; } message DynamicRouteConfig { // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the route configuration was loaded. string version_info = 1; // The route config. google.protobuf.Any route_config = 2; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 3; } // The statically loaded route configs. repeated StaticRouteConfig static_route_configs = 2; // The dynamically loaded route configs. repeated DynamicRouteConfig dynamic_route_configs = 3; } // Envoy's scoped RDS implementation fills this message with all currently loaded route // configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both // the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the // dynamically obtained scopes via the SRDS API. message ScopedRoutesConfigDump { message InlineScopedRouteConfigs { // The name assigned to the scoped route configurations. string name = 1; // The scoped route configurations. repeated google.protobuf.Any scoped_route_configs = 2; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 3; } message DynamicScopedRouteConfigs { // The name assigned to the scoped route configurations. string name = 1; // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the scoped routes configuration was loaded. string version_info = 2; // The scoped route configurations. repeated google.protobuf.Any scoped_route_configs = 3; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 4; } // The statically loaded scoped route configs. repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; // The dynamically loaded scoped route configs. repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; } // Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. message SecretsConfigDump { // DynamicSecret contains secret information fetched via SDS. message DynamicSecret { // The name assigned to the secret. string name = 1; // This is the per-resource version information. string version_info = 2; // The timestamp when the secret was last updated. google.protobuf.Timestamp last_updated = 3; // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. google.protobuf.Any secret = 4; } // StaticSecret specifies statically loaded secret in bootstrap. message StaticSecret { // The name assigned to the secret. string name = 1; // The timestamp when the secret was last updated. google.protobuf.Timestamp last_updated = 2; // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. google.protobuf.Any secret = 3; } // The statically loaded secrets. repeated StaticSecret static_secrets = 1; // The dynamically loaded active secrets. These are secrets that are available to service // clusters or listeners. repeated DynamicSecret dynamic_active_secrets = 2; // The dynamically loaded warming secrets. These are secrets that are currently undergoing // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } ================================================ FILE: api/envoy/admin/v2alpha/listeners.proto ================================================ syntax = "proto3"; package envoy.admin.v2alpha; import "envoy/api/v2/core/address.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ListenersProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listeners] // Admin endpoint uses this wrapper for `/listeners` to display listener status information. // See :ref:`/listeners ` for more information. message Listeners { // List of listener statuses. repeated ListenerStatus listener_statuses = 1; } // Details an individual listener's current status. message ListenerStatus { // Name of the listener string name = 1; // The actual local address that the listener is listening on. If a listener was configured // to listen on port 0, then this address has the port that was allocated by the OS. api.v2.core.Address local_address = 2; } ================================================ FILE: api/envoy/admin/v2alpha/memory.proto ================================================ syntax = "proto3"; package envoy.admin.v2alpha; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "MemoryProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Memory] // Proto representation of the internal memory consumption of an Envoy instance. These represent // values extracted from an internal TCMalloc instance. For more information, see the section of the // docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). // [#next-free-field: 7] message Memory { // The number of bytes allocated by the heap for Envoy. This is an alias for // `generic.current_allocated_bytes`. uint64 allocated = 1; // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for // `generic.heap_size`. uint64 heap_size = 2; // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards // virtual memory usage, and depending on the OS, typically do not count towards physical memory // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. uint64 pageheap_unmapped = 3; // The number of bytes in free, mapped pages in the page heap. These bytes always count towards // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. uint64 pageheap_free = 4; // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias // for `tcmalloc.current_total_thread_cache_bytes`. uint64 total_thread_cache = 5; // The number of bytes of the physical memory usage by the allocator. This is an alias for // `generic.total_physical_bytes`. uint64 total_physical_bytes = 6; } ================================================ FILE: api/envoy/admin/v2alpha/metrics.proto ================================================ syntax = "proto3"; package envoy.admin.v2alpha; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "MetricsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metrics] // Proto representation of an Envoy Counter or Gauge value. message SimpleMetric { enum Type { COUNTER = 0; GAUGE = 1; } // Type of the metric represented. Type type = 1; // Current metric value. uint64 value = 2; // Name of the metric. string name = 3; } ================================================ FILE: api/envoy/admin/v2alpha/mutex_stats.proto ================================================ syntax = "proto3"; package envoy.admin.v2alpha; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "MutexStatsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: MutexStats] // Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run // under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` // [docs](https://abseil.io/about/design/mutex#extra-features). // // *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not // correspond to core clock frequency. For more information, see the `CycleClock` // [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). message MutexStats { // The number of individual mutex contentions which have occurred since startup. uint64 num_contentions = 1; // The length of the current contention wait cycle. uint64 current_wait_cycles = 2; // The lifetime total of all contention wait cycles. uint64 lifetime_wait_cycles = 3; } ================================================ FILE: api/envoy/admin/v2alpha/server_info.proto ================================================ syntax = "proto3"; package envoy.admin.v2alpha; import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "ServerInfoProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Server State] // Proto representation of the value returned by /server_info, containing // server version/server status information. // [#next-free-field: 7] message ServerInfo { enum State { // Server is live and serving traffic. LIVE = 0; // Server is draining listeners in response to external health checks failing. DRAINING = 1; // Server has not yet completed cluster manager initialization. PRE_INITIALIZING = 2; // Server is running the cluster manager initialization callbacks (e.g., RDS). INITIALIZING = 3; } // Server version. string version = 1; // State of the server. State state = 2; // Uptime since current epoch was started. google.protobuf.Duration uptime_current_epoch = 3; // Uptime since the start of the first epoch. google.protobuf.Duration uptime_all_epochs = 4; // Hot restart version. string hot_restart_version = 5; // Command line options the server is currently running with. CommandLineOptions command_line_options = 6; } // [#next-free-field: 29] message CommandLineOptions { enum IpVersion { v4 = 0; v6 = 1; } enum Mode { // Validate configs and then serve traffic normally. Serve = 0; // Validate configs and exit. Validate = 1; // Completely load and initialize the config, and then exit without running the listener loop. InitOnly = 2; } reserved 12; // See :option:`--base-id` for details. uint64 base_id = 1; // See :option:`--concurrency` for details. uint32 concurrency = 2; // See :option:`--config-path` for details. string config_path = 3; // See :option:`--config-yaml` for details. string config_yaml = 4; // See :option:`--allow-unknown-static-fields` for details. bool allow_unknown_static_fields = 5; // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; // See :option:`--admin-address-path` for details. string admin_address_path = 6; // See :option:`--local-address-ip-version` for details. IpVersion local_address_ip_version = 7; // See :option:`--log-level` for details. string log_level = 8; // See :option:`--component-log-level` for details. string component_log_level = 9; // See :option:`--log-format` for details. string log_format = 10; // See :option:`--log-format-escaped` for details. bool log_format_escaped = 27; // See :option:`--log-path` for details. string log_path = 11; // See :option:`--service-cluster` for details. string service_cluster = 13; // See :option:`--service-node` for details. string service_node = 14; // See :option:`--service-zone` for details. string service_zone = 15; // See :option:`--file-flush-interval-msec` for details. google.protobuf.Duration file_flush_interval = 16; // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; // See :option:`--mode` for details. Mode mode = 19; // max_stats and max_obj_name_len are now unused and have no effect. uint64 max_stats = 20 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; uint64 max_obj_name_len = 21 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // See :option:`--disable-hot-restart` for details. bool disable_hot_restart = 22; // See :option:`--enable-mutex-tracing` for details. bool enable_mutex_tracing = 23; // See :option:`--restart-epoch` for details. uint32 restart_epoch = 24; // See :option:`--cpuset-threads` for details. bool cpuset_threads = 25; // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; } ================================================ FILE: api/envoy/admin/v2alpha/tap.proto ================================================ syntax = "proto3"; package envoy.admin.v2alpha; import "envoy/service/tap/v2alpha/common.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.admin.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap] // The /tap admin request body that is used to configure an active tap session. message TapRequest { // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; // The tap configuration to load. service.tap.v2alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/admin/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/admin/v2alpha:pkg", "//envoy/annotations:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/admin/v3/certs.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "CertsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Certificates] // Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to // display certificate information. See :ref:`/certs ` for more // information. message Certificates { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Certificates"; // List of certificates known to an Envoy. repeated Certificate certificates = 1; } message Certificate { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Certificate"; // Details of CA certificate. repeated CertificateDetails ca_cert = 1; // Details of Certificate Chain repeated CertificateDetails cert_chain = 2; } // [#next-free-field: 8] message CertificateDetails { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CertificateDetails"; message OcspDetails { // Indicates the time from which the OCSP response is valid. google.protobuf.Timestamp valid_from = 1; // Indicates the time at which the OCSP response expires. google.protobuf.Timestamp expiration = 2; } // Path of the certificate. string path = 1; // Certificate Serial Number. string serial_number = 2; // List of Subject Alternate names. repeated SubjectAlternateName subject_alt_names = 3; // Minimum of days until expiration of certificate and it's chain. uint64 days_until_expiration = 4; // Indicates the time from which the certificate is valid. google.protobuf.Timestamp valid_from = 5; // Indicates the time at which the certificate expires. google.protobuf.Timestamp expiration_time = 6; // Details related to the OCSP response associated with this certificate, if any. OcspDetails ocsp_details = 7; } message SubjectAlternateName { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.SubjectAlternateName"; // Subject Alternate Name. oneof name { string dns = 1; string uri = 2; string ip_address = 3; } } ================================================ FILE: api/envoy/admin/v3/clusters.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "envoy/admin/v3/metrics.proto"; import "envoy/config/cluster/v3/circuit_breaker.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/type/v3/percent.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ClustersProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Clusters] // Admin endpoint uses this wrapper for `/clusters` to display cluster status information. // See :ref:`/clusters ` for more information. message Clusters { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Clusters"; // Mapping from cluster name to each cluster's status. repeated ClusterStatus cluster_statuses = 1; } // Details an individual cluster's current status. // [#next-free-field: 7] message ClusterStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClusterStatus"; // Name of the cluster. string name = 1; // Denotes whether this cluster was added via API or configured statically. bool added_via_api = 2; // The success rate threshold used in the last interval. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all errors: externally and locally generated were used to calculate the threshold. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*, only externally generated errors were used to calculate the threshold. // The threshold is used to eject hosts based on their success rate. See // :ref:`Cluster outlier detection ` documentation for details. // // Note: this field may be omitted in any of the three following cases: // // 1. There were not enough hosts with enough request volume to proceed with success rate based // outlier ejection. // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.v3.Percent success_rate_ejection_threshold = 3; // Mapping from host address to the host's current status. repeated HostStatus host_statuses = 4; // The success rate threshold used in the last interval when only locally originated failures were // taken into account and externally originated errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*. The threshold is used to eject hosts based on their success rate. // See :ref:`Cluster outlier detection ` documentation for // details. // // Note: this field may be omitted in any of the three following cases: // // 1. There were not enough hosts with enough request volume to proceed with success rate based // outlier ejection. // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.v3.Percent local_origin_success_rate_ejection_threshold = 5; // :ref:`Circuit breaking ` settings of the cluster. config.cluster.v3.CircuitBreakers circuit_breakers = 6; } // Current state of a particular host. // [#next-free-field: 10] message HostStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.HostStatus"; // Address of this host. config.core.v3.Address address = 1; // List of stats specific to this host. repeated SimpleMetric stats = 2; // The host's current health status. HostHealthStatus health_status = 3; // Request success rate for this host over the last calculated interval. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all errors: externally and locally generated were used in success rate // calculation. If // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*, only externally generated errors were used in success rate calculation. // See :ref:`Cluster outlier detection ` documentation for // details. // // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. type.v3.Percent success_rate = 4; // The host's weight. If not configured, the value defaults to 1. uint32 weight = 5; // The hostname of the host, if applicable. string hostname = 6; // The host's priority. If not configured, the value defaults to 0 (highest priority). uint32 priority = 7; // Request success rate for this host over the last calculated // interval when only locally originated errors are taken into account and externally originated // errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*. // See :ref:`Cluster outlier detection ` documentation for // details. // // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. type.v3.Percent local_origin_success_rate = 8; // locality of the host. config.core.v3.Locality locality = 9; } // Health status for a host. // [#next-free-field: 7] message HostHealthStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.HostHealthStatus"; // The host is currently failing active health checks. bool failed_active_health_check = 1; // The host is currently considered an outlier and has been ejected. bool failed_outlier_check = 2; // The host is currently being marked as degraded through active health checking. bool failed_active_degraded_check = 4; // The host has been removed from service discovery, but is being stabilized due to active // health checking. bool pending_dynamic_removal = 5; // The host has not yet been health checked. bool pending_active_hc = 6; // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported // here. // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] config.core.v3.HealthStatus eds_health_status = 3; } ================================================ FILE: api/envoy/admin/v3/config_dump.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "envoy/config/bootstrap/v3/bootstrap.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ConfigDumpProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: ConfigDump] // The :ref:`/config_dump ` admin endpoint uses this wrapper // message to maintain and serve arbitrary configuration information from any component in Envoy. message ConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ConfigDump"; // This list is serialized and dumped in its entirety at the // :ref:`/config_dump ` endpoint. // // The following configurations are currently supported and will be dumped in the order given // below: // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` // // EDS Configuration will only be dumped by using parameter `?include_eds` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, // :ref:`/config_dump?mask={} `, // or :ref:`/config_dump?resource={},mask={} // ` for more information. repeated google.protobuf.Any configs = 1; } message UpdateFailureState { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.UpdateFailureState"; // What the component configuration would have been if the update had succeeded. google.protobuf.Any failed_configuration = 1; // Time of the latest failed update attempt. google.protobuf.Timestamp last_update_attempt = 2; // Details about the last failed update attempt. string details = 3; } // This message describes the bootstrap configuration that Envoy was started with. This includes // any CLI overrides that were merged. Bootstrap configuration information can be used to recreate // the static portions of an Envoy configuration by reusing the output as the bootstrap // configuration for another Envoy. message BootstrapConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.BootstrapConfigDump"; config.bootstrap.v3.Bootstrap bootstrap = 1; // The timestamp when the BootstrapConfig was last updated. google.protobuf.Timestamp last_updated = 2; } // Envoy's listener manager fills this message with all currently known listeners. Listener // configuration information can be used to recreate an Envoy configuration by populating all // listeners as static listeners or by returning them in a LDS response. message ListenersConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ListenersConfigDump"; // Describes a statically loaded listener. message StaticListener { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ListenersConfigDump.StaticListener"; // The listener config. google.protobuf.Any listener = 1; // The timestamp when the Listener was last successfully updated. google.protobuf.Timestamp last_updated = 2; } message DynamicListenerState { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ListenersConfigDump.DynamicListenerState"; // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time // that the listener was loaded. In the future, discrete per-listener versions may be supported // by the API. string version_info = 1; // The listener config. google.protobuf.Any listener = 2; // The timestamp when the Listener was last successfully updated. google.protobuf.Timestamp last_updated = 3; } // Describes a dynamically loaded listener via the LDS API. // [#next-free-field: 6] message DynamicListener { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ListenersConfigDump.DynamicListener"; // The name or unique id of this listener, pulled from the DynamicListenerState config. string name = 1; // The listener state for any active listener by this name. // These are listeners that are available to service data plane traffic. DynamicListenerState active_state = 2; // The listener state for any warming listener by this name. // These are listeners that are currently undergoing warming in preparation to service data // plane traffic. Note that if attempting to recreate an Envoy configuration from a // configuration dump, the warming listeners should generally be discarded. DynamicListenerState warming_state = 3; // The listener state for any draining listener by this name. // These are listeners that are currently undergoing draining in preparation to stop servicing // data plane traffic. Note that if attempting to recreate an Envoy configuration from a // configuration dump, the draining listeners should generally be discarded. DynamicListenerState draining_state = 4; // Set if the last update failed, cleared after the next successful update. UpdateFailureState error_state = 5; } // This is the :ref:`version_info ` in the // last processed LDS discovery response. If there are only static bootstrap listeners, this field // will be "". string version_info = 1; // The statically loaded listener configs. repeated StaticListener static_listeners = 2; // State for any warming, active, or draining listeners. repeated DynamicListener dynamic_listeners = 3; } // Envoy's cluster manager fills this message with all currently known clusters. Cluster // configuration information can be used to recreate an Envoy configuration by populating all // clusters as static clusters or by returning them in a CDS response. message ClustersConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClustersConfigDump"; // Describes a statically loaded cluster. message StaticCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClustersConfigDump.StaticCluster"; // The cluster config. google.protobuf.Any cluster = 1; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 2; } // Describes a dynamically loaded cluster via the CDS API. message DynamicCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ClustersConfigDump.DynamicCluster"; // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by // the API. string version_info = 1; // The cluster config. google.protobuf.Any cluster = 2; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 3; } // This is the :ref:`version_info ` in the // last processed CDS discovery response. If there are only static bootstrap clusters, this field // will be "". string version_info = 1; // The statically loaded cluster configs. repeated StaticCluster static_clusters = 2; // The dynamically loaded active clusters. These are clusters that are available to service // data plane traffic. repeated DynamicCluster dynamic_active_clusters = 3; // The dynamically loaded warming clusters. These are clusters that are currently undergoing // warming in preparation to service data plane traffic. Note that if attempting to recreate an // Envoy configuration from a configuration dump, the warming clusters should generally be // discarded. repeated DynamicCluster dynamic_warming_clusters = 4; } // Envoy's RDS implementation fills this message with all currently loaded routes, as described by // their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration // or defined inline while configuring listeners are separated from those configured dynamically via RDS. // Route configuration information can be used to recreate an Envoy configuration by populating all routes // as static routes or by returning them in RDS responses. message RoutesConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.RoutesConfigDump"; message StaticRouteConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.RoutesConfigDump.StaticRouteConfig"; // The route config. google.protobuf.Any route_config = 1; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 2; } message DynamicRouteConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.RoutesConfigDump.DynamicRouteConfig"; // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the route configuration was loaded. string version_info = 1; // The route config. google.protobuf.Any route_config = 2; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 3; } // The statically loaded route configs. repeated StaticRouteConfig static_route_configs = 2; // The dynamically loaded route configs. repeated DynamicRouteConfig dynamic_route_configs = 3; } // Envoy's scoped RDS implementation fills this message with all currently loaded route // configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both // the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the // dynamically obtained scopes via the SRDS API. message ScopedRoutesConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ScopedRoutesConfigDump"; message InlineScopedRouteConfigs { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ScopedRoutesConfigDump.InlineScopedRouteConfigs"; // The name assigned to the scoped route configurations. string name = 1; // The scoped route configurations. repeated google.protobuf.Any scoped_route_configs = 2; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 3; } message DynamicScopedRouteConfigs { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ScopedRoutesConfigDump.DynamicScopedRouteConfigs"; // The name assigned to the scoped route configurations. string name = 1; // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the scoped routes configuration was loaded. string version_info = 2; // The scoped route configurations. repeated google.protobuf.Any scoped_route_configs = 3; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 4; } // The statically loaded scoped route configs. repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; // The dynamically loaded scoped route configs. repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; } // Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. message SecretsConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.SecretsConfigDump"; // DynamicSecret contains secret information fetched via SDS. message DynamicSecret { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.SecretsConfigDump.DynamicSecret"; // The name assigned to the secret. string name = 1; // This is the per-resource version information. string version_info = 2; // The timestamp when the secret was last updated. google.protobuf.Timestamp last_updated = 3; // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. google.protobuf.Any secret = 4; } // StaticSecret specifies statically loaded secret in bootstrap. message StaticSecret { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.SecretsConfigDump.StaticSecret"; // The name assigned to the secret. string name = 1; // The timestamp when the secret was last updated. google.protobuf.Timestamp last_updated = 2; // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. google.protobuf.Any secret = 3; } // The statically loaded secrets. repeated StaticSecret static_secrets = 1; // The dynamically loaded active secrets. These are secrets that are available to service // clusters or listeners. repeated DynamicSecret dynamic_active_secrets = 2; // The dynamically loaded warming secrets. These are secrets that are currently undergoing // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } // Envoy's admin fill this message with all currently known endpoints. Endpoint // configuration information can be used to recreate an Envoy configuration by populating all // endpoints as static endpoints or by returning them in an EDS response. message EndpointsConfigDump { message StaticEndpointConfig { // The endpoint config. google.protobuf.Any endpoint_config = 1; // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 2; } message DynamicEndpointConfig { // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the endpoint configuration was loaded. string version_info = 1; // The endpoint config. google.protobuf.Any endpoint_config = 2; // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 3; } // The statically loaded endpoint configs. repeated StaticEndpointConfig static_endpoint_configs = 2; // The dynamically loaded endpoint configs. repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; } ================================================ FILE: api/envoy/admin/v3/init_dump.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "InitDumpProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: InitDump] // Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers, // which provides the information of their unready targets. // The :ref:`/init_dump ` will dump all unready targets information. message UnreadyTargetsDumps { // Message of unready targets information of an init manager. message UnreadyTargetsDump { // Name of the init manager. Example: "init_manager_xxx". string name = 1; // Names of unready targets of the init manager. Example: "target_xxx". repeated string target_names = 2; } // You can choose specific component to dump unready targets with mask query parameter. // See :ref:`/init_dump?mask={} ` for more information. // The dumps of unready targets of all init managers. repeated UnreadyTargetsDump unready_targets_dumps = 1; } ================================================ FILE: api/envoy/admin/v3/listeners.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "envoy/config/core/v3/address.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ListenersProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listeners] // Admin endpoint uses this wrapper for `/listeners` to display listener status information. // See :ref:`/listeners ` for more information. message Listeners { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Listeners"; // List of listener statuses. repeated ListenerStatus listener_statuses = 1; } // Details an individual listener's current status. message ListenerStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ListenerStatus"; // Name of the listener string name = 1; // The actual local address that the listener is listening on. If a listener was configured // to listen on port 0, then this address has the port that was allocated by the OS. config.core.v3.Address local_address = 2; } ================================================ FILE: api/envoy/admin/v3/memory.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "MemoryProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Memory] // Proto representation of the internal memory consumption of an Envoy instance. These represent // values extracted from an internal TCMalloc instance. For more information, see the section of the // docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). // [#next-free-field: 7] message Memory { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.Memory"; // The number of bytes allocated by the heap for Envoy. This is an alias for // `generic.current_allocated_bytes`. uint64 allocated = 1; // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for // `generic.heap_size`. uint64 heap_size = 2; // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards // virtual memory usage, and depending on the OS, typically do not count towards physical memory // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. uint64 pageheap_unmapped = 3; // The number of bytes in free, mapped pages in the page heap. These bytes always count towards // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. uint64 pageheap_free = 4; // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias // for `tcmalloc.current_total_thread_cache_bytes`. uint64 total_thread_cache = 5; // The number of bytes of the physical memory usage by the allocator. This is an alias for // `generic.total_physical_bytes`. uint64 total_physical_bytes = 6; } ================================================ FILE: api/envoy/admin/v3/metrics.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "MetricsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metrics] // Proto representation of an Envoy Counter or Gauge value. message SimpleMetric { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.SimpleMetric"; enum Type { COUNTER = 0; GAUGE = 1; } // Type of the metric represented. Type type = 1; // Current metric value. uint64 value = 2; // Name of the metric. string name = 3; } ================================================ FILE: api/envoy/admin/v3/mutex_stats.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "MutexStatsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: MutexStats] // Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run // under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` // [docs](https://abseil.io/about/design/mutex#extra-features). // // *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not // correspond to core clock frequency. For more information, see the `CycleClock` // [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). message MutexStats { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.MutexStats"; // The number of individual mutex contentions which have occurred since startup. uint64 num_contentions = 1; // The length of the current contention wait cycle. uint64 current_wait_cycles = 2; // The lifetime total of all contention wait cycles. uint64 lifetime_wait_cycles = 3; } ================================================ FILE: api/envoy/admin/v3/server_info.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "ServerInfoProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Server State] // Proto representation of the value returned by /server_info, containing // server version/server status information. // [#next-free-field: 8] message ServerInfo { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.ServerInfo"; enum State { // Server is live and serving traffic. LIVE = 0; // Server is draining listeners in response to external health checks failing. DRAINING = 1; // Server has not yet completed cluster manager initialization. PRE_INITIALIZING = 2; // Server is running the cluster manager initialization callbacks (e.g., RDS). INITIALIZING = 3; } // Server version. string version = 1; // State of the server. State state = 2; // Uptime since current epoch was started. google.protobuf.Duration uptime_current_epoch = 3; // Uptime since the start of the first epoch. google.protobuf.Duration uptime_all_epochs = 4; // Hot restart version. string hot_restart_version = 5; // Command line options the server is currently running with. CommandLineOptions command_line_options = 6; // Populated node identity of this server. config.core.v3.Node node = 7; } // [#next-free-field: 37] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.CommandLineOptions"; enum IpVersion { v4 = 0; v6 = 1; } enum Mode { // Validate configs and then serve traffic normally. Serve = 0; // Validate configs and exit. Validate = 1; // Completely load and initialize the config, and then exit without running the listener loop. InitOnly = 2; } enum DrainStrategy { // Gradually discourage connections over the course of the drain period. Gradual = 0; // Discourage all connections for the duration of the drain sequence. Immediate = 1; } reserved 12, 20, 21; reserved "max_stats", "max_obj_name_len"; // See :option:`--base-id` for details. uint64 base_id = 1; // See :option:`--use-dynamic-base-id` for details. bool use_dynamic_base_id = 31; // See :option:`--base-id-path` for details. string base_id_path = 32; // See :option:`--concurrency` for details. uint32 concurrency = 2; // See :option:`--config-path` for details. string config_path = 3; // See :option:`--config-yaml` for details. string config_yaml = 4; // See :option:`--allow-unknown-static-fields` for details. bool allow_unknown_static_fields = 5; // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; // See :option:`--ignore-unknown-dynamic-fields` for details. bool ignore_unknown_dynamic_fields = 30; // See :option:`--admin-address-path` for details. string admin_address_path = 6; // See :option:`--local-address-ip-version` for details. IpVersion local_address_ip_version = 7; // See :option:`--log-level` for details. string log_level = 8; // See :option:`--component-log-level` for details. string component_log_level = 9; // See :option:`--log-format` for details. string log_format = 10; // See :option:`--log-format-escaped` for details. bool log_format_escaped = 27; // See :option:`--log-path` for details. string log_path = 11; // See :option:`--service-cluster` for details. string service_cluster = 13; // See :option:`--service-node` for details. string service_node = 14; // See :option:`--service-zone` for details. string service_zone = 15; // See :option:`--file-flush-interval-msec` for details. google.protobuf.Duration file_flush_interval = 16; // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; // See :option:`--drain-strategy` for details. DrainStrategy drain_strategy = 33; // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; // See :option:`--mode` for details. Mode mode = 19; // See :option:`--disable-hot-restart` for details. bool disable_hot_restart = 22; // See :option:`--enable-mutex-tracing` for details. bool enable_mutex_tracing = 23; // See :option:`--restart-epoch` for details. uint32 restart_epoch = 24; // See :option:`--cpuset-threads` for details. bool cpuset_threads = 25; // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; // See :option:`--bootstrap-version` for details. uint32 bootstrap_version = 29; // See :option:`--enable-fine-grain-logging` for details. bool enable_fine_grain_logging = 34; // See :option:`--socket-path` for details. string socket_path = 35; // See :option:`--socket-mode` for details. uint32 socket_mode = 36; } ================================================ FILE: api/envoy/admin/v3/tap.proto ================================================ syntax = "proto3"; package envoy.admin.v3; import "envoy/config/tap/v3/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.admin.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap] // The /tap admin request body that is used to configure an active tap session. message TapRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v2alpha.TapRequest"; // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. string config_id = 1 [(validate.rules).string = {min_len: 1}]; // The tap configuration to load. config.tap.v3.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/admin/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/admin/v3:pkg", "//envoy/annotations:pkg", "//envoy/config/bootstrap/v4alpha:pkg", "//envoy/config/cluster/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/tap/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/admin/v4alpha/certs.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "CertsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Certificates] // Proto representation of certificate details. Admin endpoint uses this wrapper for `/certs` to // display certificate information. See :ref:`/certs ` for more // information. message Certificates { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificates"; // List of certificates known to an Envoy. repeated Certificate certificates = 1; } message Certificate { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Certificate"; // Details of CA certificate. repeated CertificateDetails ca_cert = 1; // Details of Certificate Chain repeated CertificateDetails cert_chain = 2; } // [#next-free-field: 8] message CertificateDetails { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CertificateDetails"; message OcspDetails { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CertificateDetails.OcspDetails"; // Indicates the time from which the OCSP response is valid. google.protobuf.Timestamp valid_from = 1; // Indicates the time at which the OCSP response expires. google.protobuf.Timestamp expiration = 2; } // Path of the certificate. string path = 1; // Certificate Serial Number. string serial_number = 2; // List of Subject Alternate names. repeated SubjectAlternateName subject_alt_names = 3; // Minimum of days until expiration of certificate and it's chain. uint64 days_until_expiration = 4; // Indicates the time from which the certificate is valid. google.protobuf.Timestamp valid_from = 5; // Indicates the time at which the certificate expires. google.protobuf.Timestamp expiration_time = 6; // Details related to the OCSP response associated with this certificate, if any. OcspDetails ocsp_details = 7; } message SubjectAlternateName { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SubjectAlternateName"; // Subject Alternate Name. oneof name { string dns = 1; string uri = 2; string ip_address = 3; } } ================================================ FILE: api/envoy/admin/v4alpha/clusters.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "envoy/admin/v4alpha/metrics.proto"; import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/health_check.proto"; import "envoy/type/v3/percent.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "ClustersProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Clusters] // Admin endpoint uses this wrapper for `/clusters` to display cluster status information. // See :ref:`/clusters ` for more information. message Clusters { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Clusters"; // Mapping from cluster name to each cluster's status. repeated ClusterStatus cluster_statuses = 1; } // Details an individual cluster's current status. // [#next-free-field: 7] message ClusterStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClusterStatus"; // Name of the cluster. string name = 1; // Denotes whether this cluster was added via API or configured statically. bool added_via_api = 2; // The success rate threshold used in the last interval. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all errors: externally and locally generated were used to calculate the threshold. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*, only externally generated errors were used to calculate the threshold. // The threshold is used to eject hosts based on their success rate. See // :ref:`Cluster outlier detection ` documentation for details. // // Note: this field may be omitted in any of the three following cases: // // 1. There were not enough hosts with enough request volume to proceed with success rate based // outlier ejection. // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.v3.Percent success_rate_ejection_threshold = 3; // Mapping from host address to the host's current status. repeated HostStatus host_statuses = 4; // The success rate threshold used in the last interval when only locally originated failures were // taken into account and externally originated errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*. The threshold is used to eject hosts based on their success rate. // See :ref:`Cluster outlier detection ` documentation for // details. // // Note: this field may be omitted in any of the three following cases: // // 1. There were not enough hosts with enough request volume to proceed with success rate based // outlier ejection. // 2. The threshold is computed to be < 0 because a negative value implies that there was no // threshold for that interval. // 3. Outlier detection is not enabled for this cluster. type.v3.Percent local_origin_success_rate_ejection_threshold = 5; // :ref:`Circuit breaking ` settings of the cluster. config.cluster.v4alpha.CircuitBreakers circuit_breakers = 6; } // Current state of a particular host. // [#next-free-field: 10] message HostStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostStatus"; // Address of this host. config.core.v4alpha.Address address = 1; // List of stats specific to this host. repeated SimpleMetric stats = 2; // The host's current health status. HostHealthStatus health_status = 3; // Request success rate for this host over the last calculated interval. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all errors: externally and locally generated were used in success rate // calculation. If // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*, only externally generated errors were used in success rate calculation. // See :ref:`Cluster outlier detection ` documentation for // details. // // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. type.v3.Percent success_rate = 4; // The host's weight. If not configured, the value defaults to 1. uint32 weight = 5; // The hostname of the host, if applicable. string hostname = 6; // The host's priority. If not configured, the value defaults to 0 (highest priority). uint32 priority = 7; // Request success rate for this host over the last calculated // interval when only locally originated errors are taken into account and externally originated // errors were treated as success. // This field should be interpreted only when // :ref:`outlier_detection.split_external_local_origin_errors` // is *true*. // See :ref:`Cluster outlier detection ` documentation for // details. // // Note: the message will not be present if host did not have enough request volume to calculate // success rate or the cluster did not have enough hosts to run through success rate outlier // ejection. type.v3.Percent local_origin_success_rate = 8; // locality of the host. config.core.v4alpha.Locality locality = 9; } // Health status for a host. // [#next-free-field: 7] message HostHealthStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.HostHealthStatus"; // The host is currently failing active health checks. bool failed_active_health_check = 1; // The host is currently considered an outlier and has been ejected. bool failed_outlier_check = 2; // The host is currently being marked as degraded through active health checking. bool failed_active_degraded_check = 4; // The host has been removed from service discovery, but is being stabilized due to active // health checking. bool pending_dynamic_removal = 5; // The host has not yet been health checked. bool pending_active_hc = 6; // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported // here. // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.] config.core.v4alpha.HealthStatus eds_health_status = 3; } ================================================ FILE: api/envoy/admin/v4alpha/config_dump.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "envoy/config/bootstrap/v4alpha/bootstrap.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "ConfigDumpProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: ConfigDump] // The :ref:`/config_dump ` admin endpoint uses this wrapper // message to maintain and serve arbitrary configuration information from any component in Envoy. message ConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ConfigDump"; // This list is serialized and dumped in its entirety at the // :ref:`/config_dump ` endpoint. // // The following configurations are currently supported and will be dumped in the order given // below: // // * *bootstrap*: :ref:`BootstrapConfigDump ` // * *clusters*: :ref:`ClustersConfigDump ` // * *endpoints*: :ref:`EndpointsConfigDump ` // * *listeners*: :ref:`ListenersConfigDump ` // * *routes*: :ref:`RoutesConfigDump ` // // EDS Configuration will only be dumped by using parameter `?include_eds` // // You can filter output with the resource and mask query parameters. // See :ref:`/config_dump?resource={} `, // :ref:`/config_dump?mask={} `, // or :ref:`/config_dump?resource={},mask={} // ` for more information. repeated google.protobuf.Any configs = 1; } message UpdateFailureState { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.UpdateFailureState"; // What the component configuration would have been if the update had succeeded. google.protobuf.Any failed_configuration = 1; // Time of the latest failed update attempt. google.protobuf.Timestamp last_update_attempt = 2; // Details about the last failed update attempt. string details = 3; } // This message describes the bootstrap configuration that Envoy was started with. This includes // any CLI overrides that were merged. Bootstrap configuration information can be used to recreate // the static portions of an Envoy configuration by reusing the output as the bootstrap // configuration for another Envoy. message BootstrapConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.BootstrapConfigDump"; config.bootstrap.v4alpha.Bootstrap bootstrap = 1; // The timestamp when the BootstrapConfig was last updated. google.protobuf.Timestamp last_updated = 2; } // Envoy's listener manager fills this message with all currently known listeners. Listener // configuration information can be used to recreate an Envoy configuration by populating all // listeners as static listeners or by returning them in a LDS response. message ListenersConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenersConfigDump"; // Describes a statically loaded listener. message StaticListener { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenersConfigDump.StaticListener"; // The listener config. google.protobuf.Any listener = 1; // The timestamp when the Listener was last successfully updated. google.protobuf.Timestamp last_updated = 2; } message DynamicListenerState { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenersConfigDump.DynamicListenerState"; // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time // that the listener was loaded. In the future, discrete per-listener versions may be supported // by the API. string version_info = 1; // The listener config. google.protobuf.Any listener = 2; // The timestamp when the Listener was last successfully updated. google.protobuf.Timestamp last_updated = 3; } // Describes a dynamically loaded listener via the LDS API. // [#next-free-field: 6] message DynamicListener { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenersConfigDump.DynamicListener"; // The name or unique id of this listener, pulled from the DynamicListenerState config. string name = 1; // The listener state for any active listener by this name. // These are listeners that are available to service data plane traffic. DynamicListenerState active_state = 2; // The listener state for any warming listener by this name. // These are listeners that are currently undergoing warming in preparation to service data // plane traffic. Note that if attempting to recreate an Envoy configuration from a // configuration dump, the warming listeners should generally be discarded. DynamicListenerState warming_state = 3; // The listener state for any draining listener by this name. // These are listeners that are currently undergoing draining in preparation to stop servicing // data plane traffic. Note that if attempting to recreate an Envoy configuration from a // configuration dump, the draining listeners should generally be discarded. DynamicListenerState draining_state = 4; // Set if the last update failed, cleared after the next successful update. UpdateFailureState error_state = 5; } // This is the :ref:`version_info ` in the // last processed LDS discovery response. If there are only static bootstrap listeners, this field // will be "". string version_info = 1; // The statically loaded listener configs. repeated StaticListener static_listeners = 2; // State for any warming, active, or draining listeners. repeated DynamicListener dynamic_listeners = 3; } // Envoy's cluster manager fills this message with all currently known clusters. Cluster // configuration information can be used to recreate an Envoy configuration by populating all // clusters as static clusters or by returning them in a CDS response. message ClustersConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClustersConfigDump"; // Describes a statically loaded cluster. message StaticCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClustersConfigDump.StaticCluster"; // The cluster config. google.protobuf.Any cluster = 1; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 2; } // Describes a dynamically loaded cluster via the CDS API. message DynamicCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ClustersConfigDump.DynamicCluster"; // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by // the API. string version_info = 1; // The cluster config. google.protobuf.Any cluster = 2; // The timestamp when the Cluster was last updated. google.protobuf.Timestamp last_updated = 3; } // This is the :ref:`version_info ` in the // last processed CDS discovery response. If there are only static bootstrap clusters, this field // will be "". string version_info = 1; // The statically loaded cluster configs. repeated StaticCluster static_clusters = 2; // The dynamically loaded active clusters. These are clusters that are available to service // data plane traffic. repeated DynamicCluster dynamic_active_clusters = 3; // The dynamically loaded warming clusters. These are clusters that are currently undergoing // warming in preparation to service data plane traffic. Note that if attempting to recreate an // Envoy configuration from a configuration dump, the warming clusters should generally be // discarded. repeated DynamicCluster dynamic_warming_clusters = 4; } // Envoy's RDS implementation fills this message with all currently loaded routes, as described by // their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration // or defined inline while configuring listeners are separated from those configured dynamically via RDS. // Route configuration information can be used to recreate an Envoy configuration by populating all routes // as static routes or by returning them in RDS responses. message RoutesConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.RoutesConfigDump"; message StaticRouteConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.RoutesConfigDump.StaticRouteConfig"; // The route config. google.protobuf.Any route_config = 1; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 2; } message DynamicRouteConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig"; // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the route configuration was loaded. string version_info = 1; // The route config. google.protobuf.Any route_config = 2; // The timestamp when the Route was last updated. google.protobuf.Timestamp last_updated = 3; } // The statically loaded route configs. repeated StaticRouteConfig static_route_configs = 2; // The dynamically loaded route configs. repeated DynamicRouteConfig dynamic_route_configs = 3; } // Envoy's scoped RDS implementation fills this message with all currently loaded route // configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both // the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the // dynamically obtained scopes via the SRDS API. message ScopedRoutesConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ScopedRoutesConfigDump"; message InlineScopedRouteConfigs { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs"; // The name assigned to the scoped route configurations. string name = 1; // The scoped route configurations. repeated google.protobuf.Any scoped_route_configs = 2; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 3; } message DynamicScopedRouteConfigs { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs"; // The name assigned to the scoped route configurations. string name = 1; // This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the scoped routes configuration was loaded. string version_info = 2; // The scoped route configurations. repeated google.protobuf.Any scoped_route_configs = 3; // The timestamp when the scoped route config set was last updated. google.protobuf.Timestamp last_updated = 4; } // The statically loaded scoped route configs. repeated InlineScopedRouteConfigs inline_scoped_route_configs = 1; // The dynamically loaded scoped route configs. repeated DynamicScopedRouteConfigs dynamic_scoped_route_configs = 2; } // Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS. message SecretsConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SecretsConfigDump"; // DynamicSecret contains secret information fetched via SDS. message DynamicSecret { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SecretsConfigDump.DynamicSecret"; // The name assigned to the secret. string name = 1; // This is the per-resource version information. string version_info = 2; // The timestamp when the secret was last updated. google.protobuf.Timestamp last_updated = 3; // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. google.protobuf.Any secret = 4; } // StaticSecret specifies statically loaded secret in bootstrap. message StaticSecret { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SecretsConfigDump.StaticSecret"; // The name assigned to the secret. string name = 1; // The timestamp when the secret was last updated. google.protobuf.Timestamp last_updated = 2; // The actual secret information. // Security sensitive information is redacted (replaced with "[redacted]") for // private keys and passwords in TLS certificates. google.protobuf.Any secret = 3; } // The statically loaded secrets. repeated StaticSecret static_secrets = 1; // The dynamically loaded active secrets. These are secrets that are available to service // clusters or listeners. repeated DynamicSecret dynamic_active_secrets = 2; // The dynamically loaded warming secrets. These are secrets that are currently undergoing // warming in preparation to service clusters or listeners. repeated DynamicSecret dynamic_warming_secrets = 3; } // Envoy's admin fill this message with all currently known endpoints. Endpoint // configuration information can be used to recreate an Envoy configuration by populating all // endpoints as static endpoints or by returning them in an EDS response. message EndpointsConfigDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump"; message StaticEndpointConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig"; // The endpoint config. google.protobuf.Any endpoint_config = 1; // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 2; } message DynamicEndpointConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig"; // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the // :ref:`version_info ` field at the time that // the endpoint configuration was loaded. string version_info = 1; // The endpoint config. google.protobuf.Any endpoint_config = 2; // [#not-implemented-hide:] The timestamp when the Endpoint was last updated. google.protobuf.Timestamp last_updated = 3; } // The statically loaded endpoint configs. repeated StaticEndpointConfig static_endpoint_configs = 2; // The dynamically loaded endpoint configs. repeated DynamicEndpointConfig dynamic_endpoint_configs = 3; } ================================================ FILE: api/envoy/admin/v4alpha/init_dump.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "InitDumpProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: InitDump] // Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers, // which provides the information of their unready targets. // The :ref:`/init_dump ` will dump all unready targets information. message UnreadyTargetsDumps { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.UnreadyTargetsDumps"; // Message of unready targets information of an init manager. message UnreadyTargetsDump { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump"; // Name of the init manager. Example: "init_manager_xxx". string name = 1; // Names of unready targets of the init manager. Example: "target_xxx". repeated string target_names = 2; } // You can choose specific component to dump unready targets with mask query parameter. // See :ref:`/init_dump?mask={} ` for more information. // The dumps of unready targets of all init managers. repeated UnreadyTargetsDump unready_targets_dumps = 1; } ================================================ FILE: api/envoy/admin/v4alpha/listeners.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "ListenersProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Listeners] // Admin endpoint uses this wrapper for `/listeners` to display listener status information. // See :ref:`/listeners ` for more information. message Listeners { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Listeners"; // List of listener statuses. repeated ListenerStatus listener_statuses = 1; } // Details an individual listener's current status. message ListenerStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ListenerStatus"; // Name of the listener string name = 1; // The actual local address that the listener is listening on. If a listener was configured // to listen on port 0, then this address has the port that was allocated by the OS. config.core.v4alpha.Address local_address = 2; } ================================================ FILE: api/envoy/admin/v4alpha/memory.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "MemoryProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Memory] // Proto representation of the internal memory consumption of an Envoy instance. These represent // values extracted from an internal TCMalloc instance. For more information, see the section of the // docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html). // [#next-free-field: 7] message Memory { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.Memory"; // The number of bytes allocated by the heap for Envoy. This is an alias for // `generic.current_allocated_bytes`. uint64 allocated = 1; // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for // `generic.heap_size`. uint64 heap_size = 2; // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards // virtual memory usage, and depending on the OS, typically do not count towards physical memory // usage. This is an alias for `tcmalloc.pageheap_unmapped_bytes`. uint64 pageheap_unmapped = 3; // The number of bytes in free, mapped pages in the page heap. These bytes always count towards // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also // count towards physical memory usage. This is an alias for `tcmalloc.pageheap_free_bytes`. uint64 pageheap_free = 4; // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias // for `tcmalloc.current_total_thread_cache_bytes`. uint64 total_thread_cache = 5; // The number of bytes of the physical memory usage by the allocator. This is an alias for // `generic.total_physical_bytes`. uint64 total_physical_bytes = 6; } ================================================ FILE: api/envoy/admin/v4alpha/metrics.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "MetricsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Metrics] // Proto representation of an Envoy Counter or Gauge value. message SimpleMetric { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.SimpleMetric"; enum Type { COUNTER = 0; GAUGE = 1; } // Type of the metric represented. Type type = 1; // Current metric value. uint64 value = 2; // Name of the metric. string name = 3; } ================================================ FILE: api/envoy/admin/v4alpha/mutex_stats.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "MutexStatsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: MutexStats] // Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run // under :option:`--enable-mutex-tracing`. For more information, see the `absl::Mutex` // [docs](https://abseil.io/about/design/mutex#extra-features). // // *NB*: The wait cycles below are measured by `absl::base_internal::CycleClock`, and may not // correspond to core clock frequency. For more information, see the `CycleClock` // [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h). message MutexStats { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.MutexStats"; // The number of individual mutex contentions which have occurred since startup. uint64 num_contentions = 1; // The length of the current contention wait cycle. uint64 current_wait_cycles = 2; // The lifetime total of all contention wait cycles. uint64 lifetime_wait_cycles = 3; } ================================================ FILE: api/envoy/admin/v4alpha/server_info.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "ServerInfoProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Server State] // Proto representation of the value returned by /server_info, containing // server version/server status information. // [#next-free-field: 8] message ServerInfo { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.ServerInfo"; enum State { // Server is live and serving traffic. LIVE = 0; // Server is draining listeners in response to external health checks failing. DRAINING = 1; // Server has not yet completed cluster manager initialization. PRE_INITIALIZING = 2; // Server is running the cluster manager initialization callbacks (e.g., RDS). INITIALIZING = 3; } // Server version. string version = 1; // State of the server. State state = 2; // Uptime since current epoch was started. google.protobuf.Duration uptime_current_epoch = 3; // Uptime since the start of the first epoch. google.protobuf.Duration uptime_all_epochs = 4; // Hot restart version. string hot_restart_version = 5; // Command line options the server is currently running with. CommandLineOptions command_line_options = 6; // Populated node identity of this server. config.core.v4alpha.Node node = 7; } // [#next-free-field: 37] message CommandLineOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.CommandLineOptions"; enum IpVersion { v4 = 0; v6 = 1; } enum Mode { // Validate configs and then serve traffic normally. Serve = 0; // Validate configs and exit. Validate = 1; // Completely load and initialize the config, and then exit without running the listener loop. InitOnly = 2; } enum DrainStrategy { // Gradually discourage connections over the course of the drain period. Gradual = 0; // Discourage all connections for the duration of the drain sequence. Immediate = 1; } reserved 12, 20, 21; reserved "max_stats", "max_obj_name_len"; // See :option:`--base-id` for details. uint64 base_id = 1; // See :option:`--use-dynamic-base-id` for details. bool use_dynamic_base_id = 31; // See :option:`--base-id-path` for details. string base_id_path = 32; // See :option:`--concurrency` for details. uint32 concurrency = 2; // See :option:`--config-path` for details. string config_path = 3; // See :option:`--config-yaml` for details. string config_yaml = 4; // See :option:`--allow-unknown-static-fields` for details. bool allow_unknown_static_fields = 5; // See :option:`--reject-unknown-dynamic-fields` for details. bool reject_unknown_dynamic_fields = 26; // See :option:`--ignore-unknown-dynamic-fields` for details. bool ignore_unknown_dynamic_fields = 30; // See :option:`--admin-address-path` for details. string admin_address_path = 6; // See :option:`--local-address-ip-version` for details. IpVersion local_address_ip_version = 7; // See :option:`--log-level` for details. string log_level = 8; // See :option:`--component-log-level` for details. string component_log_level = 9; // See :option:`--log-format` for details. string log_format = 10; // See :option:`--log-format-escaped` for details. bool log_format_escaped = 27; // See :option:`--log-path` for details. string log_path = 11; // See :option:`--service-cluster` for details. string service_cluster = 13; // See :option:`--service-node` for details. string service_node = 14; // See :option:`--service-zone` for details. string service_zone = 15; // See :option:`--file-flush-interval-msec` for details. google.protobuf.Duration file_flush_interval = 16; // See :option:`--drain-time-s` for details. google.protobuf.Duration drain_time = 17; // See :option:`--drain-strategy` for details. DrainStrategy drain_strategy = 33; // See :option:`--parent-shutdown-time-s` for details. google.protobuf.Duration parent_shutdown_time = 18; // See :option:`--mode` for details. Mode mode = 19; // See :option:`--disable-hot-restart` for details. bool disable_hot_restart = 22; // See :option:`--enable-mutex-tracing` for details. bool enable_mutex_tracing = 23; // See :option:`--restart-epoch` for details. uint32 restart_epoch = 24; // See :option:`--cpuset-threads` for details. bool cpuset_threads = 25; // See :option:`--disable-extensions` for details. repeated string disabled_extensions = 28; // See :option:`--bootstrap-version` for details. uint32 bootstrap_version = 29; // See :option:`--enable-fine-grain-logging` for details. bool enable_fine_grain_logging = 34; // See :option:`--socket-path` for details. string socket_path = 35; // See :option:`--socket-mode` for details. uint32 socket_mode = 36; } ================================================ FILE: api/envoy/admin/v4alpha/tap.proto ================================================ syntax = "proto3"; package envoy.admin.v4alpha; import "envoy/config/tap/v4alpha/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.admin.v4alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Tap] // The /tap admin request body that is used to configure an active tap session. message TapRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.admin.v3.TapRequest"; // The opaque configuration ID used to match the configuration to a loaded extension. // A tap extension configures a similar opaque ID that is used to match. string config_id = 1 [(validate.rules).string = {min_len: 1}]; // The tap configuration to load. config.tap.v4alpha.TapConfig tap_config = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/annotations/BUILD ================================================ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package() ================================================ FILE: api/envoy/annotations/deprecation.proto ================================================ syntax = "proto3"; package envoy.annotations; import "google/protobuf/descriptor.proto"; // [#protodoc-title: Deprecation] // Allows tagging proto fields as fatal by default. One Envoy release after // deprecation, deprecated fields will be disallowed by default, a state which // is reversible with :ref:`runtime overrides `. // Magic number in this file derived from top 28bit of SHA256 digest of // "envoy.annotation.disallowed_by_default" extend google.protobuf.FieldOptions { bool disallowed_by_default = 189503207; } // Magic number in this file derived from top 28bit of SHA256 digest of // "envoy.annotation.disallowed_by_default_enum" extend google.protobuf.EnumValueOptions { bool disallowed_by_default_enum = 70100853; } ================================================ FILE: api/envoy/annotations/resource.proto ================================================ syntax = "proto3"; package envoy.annotations; import "google/protobuf/descriptor.proto"; // [#protodoc-title: Resource] // Magic number in this file derived from top 28bit of SHA256 digest of "envoy.annotation.resource". extend google.protobuf.ServiceOptions { ResourceAnnotation resource = 265073217; } message ResourceAnnotation { // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource // type. string type = 1; } ================================================ FILE: api/envoy/api/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/annotations:pkg", "//envoy/api/v2/auth:pkg", "//envoy/api/v2/cluster:pkg", "//envoy/api/v2/core:pkg", "//envoy/api/v2/endpoint:pkg", "//envoy/api/v2/listener:pkg", "//envoy/api/v2/route:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/listener/v2:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/api/v2/README.md ================================================ Protocol buffer definitions for xDS and top-level resource API messages. Package group `//envoy/api/v2:friends` enumerates all consumers of the shared API messages. That includes package envoy.api.v2 itself, which contains several xDS definitions. Default visibility for all shared definitions should be set to `//envoy/api/v2:friends`. Additionally, packages envoy.api.v2.core and envoy.api.v2.auth are also consumed throughout the subpackages of `//envoy/api/v2`. ================================================ FILE: api/envoy/api/v2/auth/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/api/v2/auth/cert.proto ================================================ syntax = "proto3"; package envoy.api.v2.auth; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import public "envoy/api/v2/auth/common.proto"; import public "envoy/api/v2/auth/secret.proto"; import public "envoy/api/v2/auth/tls.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; option java_outer_classname = "CertProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; ================================================ FILE: api/envoy/api/v2/auth/common.proto ================================================ syntax = "proto3"; package envoy.api.v2.auth; import "envoy/api/v2/core/base.proto"; import "envoy/type/matcher/string.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common TLS configuration] message TlsParameters { enum TlsProtocol { // Envoy will choose the optimal TLS version. TLS_AUTO = 0; // TLS 1.0 TLSv1_0 = 1; // TLS 1.1 TLSv1_1 = 2; // TLS 1.2 TLSv1_2 = 3; // TLS 1.3 TLSv1_3 = 4; } // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list // `_ // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not // specified, the default list will be used. // // In non-FIPS builds, the default cipher list is: // // .. code-block:: none // // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] // ECDHE-ECDSA-AES128-SHA // ECDHE-RSA-AES128-SHA // AES128-GCM-SHA256 // AES128-SHA // ECDHE-ECDSA-AES256-GCM-SHA384 // ECDHE-RSA-AES256-GCM-SHA384 // ECDHE-ECDSA-AES256-SHA // ECDHE-RSA-AES256-SHA // AES256-GCM-SHA384 // AES256-SHA // // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: // // .. code-block:: none // // ECDHE-ECDSA-AES128-GCM-SHA256 // ECDHE-RSA-AES128-GCM-SHA256 // ECDHE-ECDSA-AES128-SHA // ECDHE-RSA-AES128-SHA // AES128-GCM-SHA256 // AES128-SHA // ECDHE-ECDSA-AES256-GCM-SHA384 // ECDHE-RSA-AES256-GCM-SHA384 // ECDHE-ECDSA-AES256-SHA // ECDHE-RSA-AES256-SHA // AES256-GCM-SHA384 // AES256-SHA repeated string cipher_suites = 3; // If specified, the TLS connection will only support the specified ECDH // curves. If not specified, the default curves will be used. // // In non-FIPS builds, the default curves are: // // .. code-block:: none // // X25519 // P-256 // // In builds using :ref:`BoringSSL FIPS `, the default curve is: // // .. code-block:: none // // P-256 repeated string ecdh_curves = 4; } // BoringSSL private key method configuration. The private key methods are used for external // (potentially asynchronous) signing and decryption operations. Some use cases for private key // methods would be TPM support and TLS acceleration. message PrivateKeyProvider { // Private key method provider name. The name must match a // supported private key method provider type. string provider_name = 1 [(validate.rules).string = {min_bytes: 1}]; // Private key method provider specific configuration. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true, (udpa.annotations.sensitive) = true]; google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; } } // [#next-free-field: 7] message TlsCertificate { // The TLS certificate chain. core.DataSource certificate_chain = 1; // The TLS private key. core.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; // BoringSSL private key method provider. This is an alternative to :ref:`private_key // ` field. This can't be // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key // ` and // :ref:`private_key_provider // ` fields will result in an // error. PrivateKeyProvider private_key_provider = 6; // The password to decrypt the TLS private key. If this field is not set, it is assumed that the // TLS private key is not password encrypted. core.DataSource password = 3 [(udpa.annotations.sensitive) = true]; // [#not-implemented-hide:] core.DataSource ocsp_staple = 4; // [#not-implemented-hide:] repeated core.DataSource signed_certificate_timestamp = 5; } message TlsSessionTicketKeys { // Keys for encrypting and decrypting TLS session tickets. The // first key in the array contains the key to encrypt all new sessions created by this context. // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys // by, for example, putting the new key first, and the previous key second. // // If :ref:`session_ticket_keys ` // is not specified, the TLS library will still support resuming sessions via tickets, but it will // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts // or on different hosts. // // Each key must contain exactly 80 bytes of cryptographically-secure random data. For // example, the output of ``openssl rand 80``. // // .. attention:: // // Using this feature has serious security considerations and risks. Improper handling of keys // may result in loss of secrecy in connections, even if ciphers supporting perfect forward // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some // discussion. To minimize the risk, you must: // // * Keep the session ticket keys at least as secure as your TLS certificate private keys // * Rotate session ticket keys at least daily, and preferably hourly // * Always generate keys using a cryptographically-secure random data source repeated core.DataSource keys = 1 [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; } // [#next-free-field: 11] message CertificateValidationContext { // Peer certificate verification mode. enum TrustChainVerification { // Perform default certificate verification (e.g., against CA / verification lists) VERIFY_TRUST_CHAIN = 0; // Connections where the certificate fails verification will be permitted. // For HTTP connections, the result of certificate verification can be used in route matching. ( // see :ref:`validated ` ). ACCEPT_UNTRUSTED = 1; } // TLS certificate data containing certificate authority certificates to use in verifying // a presented peer certificate (e.g. server certificate for clusters or client certificate // for listeners). If not specified and a peer certificate is presented it will not be // verified. By default, a client certificate is optional, unless one of the additional // options (:ref:`require_client_certificate // `, // :ref:`verify_certificate_spki // `, // :ref:`verify_certificate_hash // `, or // :ref:`match_subject_alt_names // `) is also // specified. // // It can optionally contain certificate revocation lists, in which case Envoy will verify // that the presented peer certificate has not been revoked by one of the included CRLs. // // See :ref:`the TLS overview ` for a list of common // system CA locations. core.DataSource trusted_ca = 1; // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate // matches one of the specified values. // // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate // can be generated with the following command: // // .. code-block:: bash // // $ openssl x509 -in path/to/client.crt -noout -pubkey // | openssl pkey -pubin -outform DER // | openssl dgst -sha256 -binary // | openssl enc -base64 // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= // // This is the format used in HTTP Public Key Pinning. // // When both: // :ref:`verify_certificate_hash // ` and // :ref:`verify_certificate_spki // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. // // .. attention:: // // This option is preferred over :ref:`verify_certificate_hash // `, // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 [(validate.rules).repeated = {items {string {min_bytes: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. // // A hex-encoded SHA-256 of the certificate can be generated with the following command: // // .. code-block:: bash // // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a // // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate // can be generated with the following command: // // .. code-block:: bash // // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A // // Both of those formats are acceptable. // // When both: // :ref:`verify_certificate_hash // ` and // :ref:`verify_certificate_spki // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 [(validate.rules).repeated = {items {string {min_bytes: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative Names. If specified, Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified values. // // .. attention:: // // Subject Alternative Names are easily spoofable and verifying only them is insecure, // therefore this option must be used together with :ref:`trusted_ca // `. repeated string verify_subject_alt_name = 4 [deprecated = true]; // An optional list of Subject Alternative name matchers. Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified matches. // // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be // configured with exact match type in the :ref:`string matcher `. // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", // it should be configured as shown below. // // .. code-block:: yaml // // match_subject_alt_names: // exact: "api.example.com" // // .. attention:: // // Subject Alternative Names are easily spoofable and verifying only them is insecure, // therefore this option must be used together with :ref:`trusted_ca // `. repeated type.matcher.StringMatcher match_subject_alt_names = 9; // [#not-implemented-hide:] Must present a signed time-stamped OCSP response. google.protobuf.BoolValue require_ocsp_staple = 5; // [#not-implemented-hide:] Must present signed certificate time-stamp. google.protobuf.BoolValue require_signed_certificate_timestamp = 6; // An optional `certificate revocation list // `_ // (in PEM format). If specified, Envoy will verify that the presented peer // certificate has not been revoked by this CRL. If this DataSource contains // multiple CRLs, all of them will be used. core.DataSource crl = 7; // If specified, Envoy will not reject expired certificates. bool allow_expired_certificate = 8; // Certificate trust chain verification mode. TrustChainVerification trust_chain_verification = 10 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/api/v2/auth/secret.proto ================================================ syntax = "proto3"; package envoy.api.v2.auth; import "envoy/api/v2/auth/common.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; option java_outer_classname = "SecretProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Secrets configuration] message GenericSecret { // Secret of generic type and is available to filters. core.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; } message SdsSecretConfig { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. string name = 1; core.ConfigSource sds_config = 2; } // [#next-free-field: 6] message Secret { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. string name = 1; oneof type { TlsCertificate tls_certificate = 2; TlsSessionTicketKeys session_ticket_keys = 3; CertificateValidationContext validation_context = 4; GenericSecret generic_secret = 5; } } ================================================ FILE: api/envoy/api/v2/auth/tls.proto ================================================ syntax = "proto3"; package envoy.api.v2.auth; import "envoy/api/v2/auth/common.proto"; import "envoy/api/v2/auth/secret.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.auth"; option java_outer_classname = "TlsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tls.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: TLS transport socket] // [#extension: envoy.transport_sockets.tls] // The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. message UpstreamTlsContext { // Common TLS context settings. // // .. attention:: // // Server certificate verification is not enabled by default. Configure // :ref:`trusted_ca` to enable // verification. CommonTlsContext common_tls_context = 1; // SNI string to use when creating TLS backend connections. string sni = 2 [(validate.rules).string = {max_bytes: 255}]; // If true, server-initiated TLS renegotiation will be allowed. // // .. attention:: // // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. bool allow_renegotiation = 3; // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets // for TLSv1.2 and older) to store for the purpose of session resumption. // // Defaults to 1, setting this to 0 disables session resumption. google.protobuf.UInt32Value max_session_keys = 4; } // [#next-free-field: 8] message DownstreamTlsContext { // Common TLS context settings. CommonTlsContext common_tls_context = 1; // If specified, Envoy will reject connections without a valid client // certificate. google.protobuf.BoolValue require_client_certificate = 2; // If specified, Envoy will reject connections without a valid and matching SNI. // [#not-implemented-hide:] google.protobuf.BoolValue require_sni = 3; oneof session_ticket_keys_type { // TLS session ticket key settings. TlsSessionTicketKeys session_ticket_keys = 4; // Config for fetching TLS session ticket keys via SDS API. SdsSecretConfig session_ticket_keys_sds_secret_config = 5; // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using // the keys specified through either :ref:`session_ticket_keys ` // or :ref:`session_ticket_keys_sds_secret_config `. // If this config is set to false and no keys are explicitly configured, the TLS server will issue // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the // implication that sessions cannot be resumed across hot restarts or on different hosts. bool disable_stateless_session_resumption = 7; } // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) // ` // only seconds could be specified (fractional seconds are going to be ignored). google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { lt {seconds: 4294967296} gte {} }]; } // TLS context shared by both client and server TLS contexts. // [#next-free-field: 9] message CommonTlsContext { message CombinedCertificateValidationContext { // How to validate peer certificates. CertificateValidationContext default_validation_context = 1 [(validate.rules).message = {required: true}]; // Config for fetching validation context via SDS API. SdsSecretConfig validation_context_sds_secret_config = 2 [(validate.rules).message = {required: true}]; } reserved 5; // TLS protocol versions, cipher suites etc. TlsParameters tls_params = 1; // :ref:`Multiple TLS certificates ` can be associated with the // same context to allow both RSA and ECDSA certificates. // // Only a single TLS certificate is supported in client contexts. In server contexts, the first // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is // used for clients that support ECDSA. repeated TlsCertificate tls_certificates = 2; // Configs for fetching TLS certificates via SDS API. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 1}]; oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; // Config for fetching validation context via SDS API. SdsSecretConfig validation_context_sds_secret_config = 7; // Combined certificate validation context holds a default CertificateValidationContext // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic // and default CertificateValidationContext are merged into a new CertificateValidationContext // for validation. This merge is done by Message::MergeFrom(), so dynamic // CertificateValidationContext overwrites singular fields in default // CertificateValidationContext, and concatenates repeated fields to default // CertificateValidationContext, and logical OR is applied to boolean fields. CombinedCertificateValidationContext combined_validation_context = 8; } // Supplies the list of ALPN protocols that the listener should expose. In // practice this is likely to be set to one of two values (see the // :ref:`codec_type // ` // parameter in the HTTP connection manager for more information): // // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. // * "http/1.1" If the listener is only going to support HTTP/1.1. // // There is no default for this parameter. If empty, Envoy will not expose ALPN. repeated string alpn_protocols = 4; } ================================================ FILE: api/envoy/api/v2/cds.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import public "envoy/api/v2/cluster.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.cluster.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: CDS] // Return list of all clusters this proxy will load balance to. service ClusterDiscoveryService { option (envoy.annotations.resource).type = "envoy.api.v2.Cluster"; rpc StreamClusters(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } rpc DeltaClusters(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc FetchClusters(DiscoveryRequest) returns (DiscoveryResponse) { option (google.api.http).post = "/v2/discovery:clusters"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message CdsDummy { } ================================================ FILE: api/envoy/api/v2/cluster/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/api/v2/cluster/circuit_breaker.proto ================================================ syntax = "proto3"; package envoy.api.v2.cluster; import "envoy/api/v2/core/base.proto"; import "envoy/type/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Circuit breakers] // :ref:`Circuit breaking` settings can be // specified individually for each defined priority. message CircuitBreakers { // A Thresholds defines CircuitBreaker settings for a // :ref:`RoutingPriority`. // [#next-free-field: 9] message Thresholds { message RetryBudget { // Specifies the limit on concurrent retries as a percentage of the sum of active requests and // active pending requests. For example, if there are 100 active requests and the // budget_percent is set to 25, there may be 25 active retries. // // This parameter is optional. Defaults to 20%. type.Percent budget_percent = 1; // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the // number of active retries may never go below this number. // // This parameter is optional. Defaults to 3. google.protobuf.UInt32Value min_retry_concurrency = 2; } // The :ref:`RoutingPriority` // the specified CircuitBreaker settings apply to. core.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; // The maximum number of connections that Envoy will make to the upstream // cluster. If not specified, the default is 1024. google.protobuf.UInt32Value max_connections = 2; // The maximum number of pending requests that Envoy will allow to the // upstream cluster. If not specified, the default is 1024. google.protobuf.UInt32Value max_pending_requests = 3; // The maximum number of parallel requests that Envoy will make to the // upstream cluster. If not specified, the default is 1024. google.protobuf.UInt32Value max_requests = 4; // The maximum number of parallel retries that Envoy will allow to the // upstream cluster. If not specified, the default is 3. google.protobuf.UInt32Value max_retries = 5; // Specifies a limit on concurrent retries in relation to the number of active requests. This // parameter is optional. // // .. note:: // // If this field is set, the retry budget will override any configured retry circuit // breaker. RetryBudget retry_budget = 8; // If track_remaining is true, then stats will be published that expose // the number of resources remaining until the circuit breakers open. If // not specified, the default is false. // // .. note:: // // If a retry budget is used in lieu of the max_retries circuit breaker, // the remaining retry resources remaining will not be tracked. bool track_remaining = 6; // The maximum number of connection pools per cluster that Envoy will concurrently support at // once. If not specified, the default is unlimited. Set this for clusters which create a // large number of connection pools. See // :ref:`Circuit Breaking ` for // more details. google.protobuf.UInt32Value max_connection_pools = 7; } // If multiple :ref:`Thresholds` // are defined with the same :ref:`RoutingPriority`, // the first one in the list is used. If no Thresholds is defined for a given // :ref:`RoutingPriority`, the default values // are used. repeated Thresholds thresholds = 1; } ================================================ FILE: api/envoy/api/v2/cluster/filter.proto ================================================ syntax = "proto3"; package envoy.api.v2.cluster; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; option java_outer_classname = "FilterProto"; option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Upstream filters] // Upstream filters apply to the connections to the upstream cluster hosts. message Filter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. google.protobuf.Any typed_config = 2; } ================================================ FILE: api/envoy/api/v2/cluster/outlier_detection.proto ================================================ syntax = "proto3"; package envoy.api.v2.cluster; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.cluster"; option java_outer_classname = "OutlierDetectionProto"; option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ClusterNS"; option ruby_package = "Envoy.Api.V2.ClusterNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Outlier detection] // See the :ref:`architecture overview ` for // more information on outlier detection. // [#next-free-field: 21] message OutlierDetection { // The number of consecutive 5xx responses or local origin errors that are mapped // to 5xx error codes before a consecutive 5xx ejection // occurs. Defaults to 5. google.protobuf.UInt32Value consecutive_5xx = 1; // The time interval between ejection analysis sweeps. This can result in // both new ejections as well as hosts being returned to service. Defaults // to 10000ms or 10s. google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the // base time multiplied by the number of times the host has been ejected. // Defaults to 30000ms or 30s. google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; // The maximum % of an upstream cluster that can be ejected due to outlier // detection. Defaults to 10% but will eject at least one host regardless of the value. google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive 5xx. This setting can be used to disable // ejection or to ramp it up slowly. Defaults to 100. google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics. This setting can be used to // disable ejection or to ramp it up slowly. Defaults to 100. google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; // The number of hosts in a cluster that must have enough request volume to // detect success rate outliers. If the number of hosts is less than this // setting, outlier detection via success rate statistics is not performed // for any host in the cluster. Defaults to 5. google.protobuf.UInt32Value success_rate_minimum_hosts = 7; // The minimum number of total requests that must be collected in one // interval (as defined by the interval duration above) to include this host // in success rate based outlier detection. If the volume is lower than this // setting, outlier detection via success rate statistics is not performed // for that host. Defaults to 100. google.protobuf.UInt32Value success_rate_request_volume = 8; // This factor is used to determine the ejection threshold for success rate // outlier ejection. The ejection threshold is the difference between the // mean success rate, and the product of this factor and the standard // deviation of the mean success rate: mean - (stdev * // success_rate_stdev_factor). This factor is divided by a thousand to get a // double. That is, if the desired factor is 1.9, the runtime value should // be 1900. Defaults to 1900. google.protobuf.UInt32Value success_rate_stdev_factor = 9; // The number of consecutive gateway failures (502, 503, 504 status codes) // before a consecutive gateway failure ejection occurs. Defaults to 5. google.protobuf.UInt32Value consecutive_gateway_failure = 10; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive gateway failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 [(validate.rules).uint32 = {lte: 100}]; // Determines whether to distinguish local origin failures from external errors. If set to true // the following configuration parameters are taken into account: // :ref:`consecutive_local_origin_failure`, // :ref:`enforcing_consecutive_local_origin_failure` // and // :ref:`enforcing_local_origin_success_rate`. // Defaults to false. bool split_external_local_origin_errors = 12; // The number of consecutive locally originated failures before ejection // occurs. Defaults to 5. Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value consecutive_local_origin_failure = 13; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive locally originated failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 100. // Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics for locally originated errors. // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. // Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 [(validate.rules).uint32 = {lte: 100}]; // The failure percentage to use when determining failure percentage-based outlier detection. If // the failure percentage of a given host is greater than or equal to this value, it will be // ejected. Defaults to 85. google.protobuf.UInt32Value failure_percentage_threshold = 16 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // failure percentage statistics. This setting can be used to disable ejection or to ramp it up // slowly. Defaults to 0. // // [#next-major-version: setting this without setting failure_percentage_threshold should be // invalid in v4.] google.protobuf.UInt32Value enforcing_failure_percentage = 17 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // local-origin failure percentage statistics. This setting can be used to disable ejection or to // ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 [(validate.rules).uint32 = {lte: 100}]; // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. // If the total number of hosts in the cluster is less than this value, failure percentage-based // ejection will not be performed. Defaults to 5. google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; // The minimum number of total requests that must be collected in one interval (as defined by the // interval duration above) to perform failure percentage-based ejection for this host. If the // volume is lower than this setting, failure percentage-based ejection will not be performed for // this host. Defaults to 50. google.protobuf.UInt32Value failure_percentage_request_volume = 20; } ================================================ FILE: api/envoy/api/v2/cluster.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/cluster/circuit_breaker.proto"; import "envoy/api/v2/cluster/filter.proto"; import "envoy/api/v2/cluster/outlier_detection.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/core/health_check.proto"; import "envoy/api/v2/core/protocol.proto"; import "envoy/api/v2/endpoint.proto"; import "envoy/type/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.cluster.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Cluster configuration] // Configuration for a single upstream cluster. // [#next-free-field: 48] message Cluster { // Refer to :ref:`service discovery type ` // for an explanation on each type. enum DiscoveryType { // Refer to the :ref:`static discovery type` // for an explanation. STATIC = 0; // Refer to the :ref:`strict DNS discovery // type` // for an explanation. STRICT_DNS = 1; // Refer to the :ref:`logical DNS discovery // type` // for an explanation. LOGICAL_DNS = 2; // Refer to the :ref:`service discovery type` // for an explanation. EDS = 3; // Refer to the :ref:`original destination discovery // type` // for an explanation. ORIGINAL_DST = 4; } // Refer to :ref:`load balancer type ` architecture // overview section for information on each type. enum LbPolicy { // Refer to the :ref:`round robin load balancing // policy` // for an explanation. ROUND_ROBIN = 0; // Refer to the :ref:`least request load balancing // policy` // for an explanation. LEAST_REQUEST = 1; // Refer to the :ref:`ring hash load balancing // policy` // for an explanation. RING_HASH = 2; // Refer to the :ref:`random load balancing // policy` // for an explanation. RANDOM = 3; // Refer to the :ref:`original destination load balancing // policy` // for an explanation. // // .. attention:: // // **This load balancing policy is deprecated**. Use CLUSTER_PROVIDED instead. // ORIGINAL_DST_LB = 4 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; // Refer to the :ref:`Maglev load balancing policy` // for an explanation. MAGLEV = 5; // This load balancer type must be specified if the configured cluster provides a cluster // specific load balancer. Consult the configured cluster's documentation for whether to set // this option or not. CLUSTER_PROVIDED = 6; // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy // ` field to determine the LB policy. // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field // and instead using the new load_balancing_policy field as the one and only mechanism for // configuring this.] LOAD_BALANCING_POLICY_CONFIG = 7; } // When V4_ONLY is selected, the DNS resolver will only perform a lookup for // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will // only perform a lookup for addresses in the IPv6 family. If AUTO is // specified, the DNS resolver will first perform a lookup for addresses in // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. // For cluster types other than // :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS`, // this setting is // ignored. enum DnsLookupFamily { AUTO = 0; V4_ONLY = 1; V6_ONLY = 2; } enum ClusterProtocolSelection { // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). // If :ref:`http2_protocol_options ` are // present, HTTP2 will be used, otherwise HTTP1.1 will be used. USE_CONFIGURED_PROTOCOL = 0; // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. USE_DOWNSTREAM_PROTOCOL = 1; } // TransportSocketMatch specifies what transport socket config will be used // when the match conditions are satisfied. message TransportSocketMatch { // The name of the match, used in stats generation. string name = 1 [(validate.rules).string = {min_len: 1}]; // Optional endpoint metadata match criteria. // The connection to the endpoint with metadata matching what is set in this field // will use the transport socket configuration specified here. // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match // against the values specified in this field. google.protobuf.Struct match = 2; // The configuration of the transport socket. core.TransportSocket transport_socket = 3; } // Extended cluster type. message CustomClusterType { // The type of the cluster to instantiate. The name must match a supported cluster type. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. google.protobuf.Any typed_config = 2; } // Only valid when discovery type is EDS. message EdsClusterConfig { // Configuration for the source of EDS updates for this Cluster. core.ConfigSource eds_config = 1; // Optional alternative to cluster name to present to EDS. This does not // have the same restrictions as cluster name, i.e. it may be arbitrary // length. string service_name = 2; } // Optionally divide the endpoints in this cluster into subsets defined by // endpoint metadata and selected by route and weighted cluster metadata. // [#next-free-field: 8] message LbSubsetConfig { // If NO_FALLBACK is selected, a result // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, // any cluster endpoint may be returned (subject to policy, health checks, // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. enum LbSubsetFallbackPolicy { NO_FALLBACK = 0; ANY_ENDPOINT = 1; DEFAULT_SUBSET = 2; } // Specifications for subsets. message LbSubsetSelector { // Allows to override top level fallback policy per selector. enum LbSubsetSelectorFallbackPolicy { // If NOT_DEFINED top level config fallback policy is used instead. NOT_DEFINED = 0; // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. NO_FALLBACK = 1; // If ANY_ENDPOINT is selected, any cluster endpoint may be returned // (subject to policy, health checks, etc). ANY_ENDPOINT = 2; // If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. DEFAULT_SUBSET = 3; // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata // keys reduced to // :ref:`fallback_keys_subset`. // It allows for a fallback to a different, less specific selector if some of the keys of // the selector are considered optional. KEYS_SUBSET = 4; } // List of keys to match with the weighted cluster metadata. repeated string keys = 1; // The behavior used when no endpoint subset matches the selected route's // metadata. LbSubsetSelectorFallbackPolicy fallback_policy = 2 [(validate.rules).enum = {defined_only: true}]; // Subset of // :ref:`keys` used by // :ref:`KEYS_SUBSET` // fallback policy. // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. // For any other fallback policy the parameter is not used and should not be set. // Only values also present in // :ref:`keys` are allowed, but // `fallback_keys_subset` cannot be equal to `keys`. repeated string fallback_keys_subset = 3; } // The behavior used when no endpoint subset matches the selected route's // metadata. The value defaults to // :ref:`NO_FALLBACK`. LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; // Specifies the default subset of endpoints used during fallback if // fallback_policy is // :ref:`DEFAULT_SUBSET`. // Each field in default_subset is // compared to the matching LbEndpoint.Metadata under the *envoy.lb* // namespace. It is valid for no hosts to match, in which case the behavior // is the same as a fallback_policy of // :ref:`NO_FALLBACK`. google.protobuf.Struct default_subset = 2; // For each entry, LbEndpoint.Metadata's // *envoy.lb* namespace is traversed and a subset is created for each unique // combination of key and value. For example: // // .. code-block:: json // // { "subset_selectors": [ // { "keys": [ "version" ] }, // { "keys": [ "stage", "hardware_type" ] } // ]} // // A subset is matched when the metadata from the selected route and // weighted cluster contains the same keys and values as the subset's // metadata. The same host may appear in multiple subsets. repeated LbSubsetSelector subset_selectors = 3; // If true, routing to subsets will take into account the localities and locality weights of the // endpoints when making the routing decision. // // There are some potential pitfalls associated with enabling this feature, as the resulting // traffic split after applying both a subset match and locality weights might be undesirable. // // Consider for example a situation in which you have 50/50 split across two localities X/Y // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 // host selected but Y having 100, then a lot more load is being dumped on the single host in X // than originally anticipated in the load balancing assignment delivered via EDS. bool locality_weight_aware = 4; // When used with locality_weight_aware, scales the weight of each locality by the ratio // of hosts in the subset vs hosts in the original subset. This aims to even out the load // going to an individual locality if said locality is disproportionately affected by the // subset predicate. bool scale_locality_weight = 5; // If true, when a fallback policy is configured and its corresponding subset fails to find // a host this will cause any host to be selected instead. // // This is useful when using the default subset as the fallback policy, given the default // subset might become empty. With this option enabled, if that happens the LB will attempt // to select a host from the entire cluster. bool panic_mode_any = 6; // If true, metadata specified for a metadata key will be matched against the corresponding // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value // and any of the elements in the list matches the criteria. bool list_as_any = 7; } // Specific configuration for the LeastRequest load balancing policy. message LeastRequestLbConfig { // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; } // Specific configuration for the :ref:`RingHash` // load balancing policy. message RingHashLbConfig { // The hash function used to hash hosts onto the ketama ring. enum HashFunction { // Use `xxHash `_, this is the default hash function. XX_HASH = 0; // Use `MurmurHash2 `_, this is compatible with // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled // on Linux and not macOS. MURMUR_HASH_2 = 1; } reserved 2; // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each // provided host) the better the request distribution will reflect the desired weights. Defaults // to 1024 entries, and limited to 8M entries. See also // :ref:`maximum_ring_size`. google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; // The hash function used to hash hosts onto the ketama ring. The value defaults to // :ref:`XX_HASH`. HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered // to further constrain resource use. See also // :ref:`minimum_ring_size`. google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; } // Specific configuration for the // :ref:`Original Destination ` // load balancing policy. message OriginalDstLbConfig { // When true, :ref:`x-envoy-original-dst-host // ` can be used to override destination // address. // // .. attention:: // // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. bool use_http_header = 1; } // Common configuration for all load balancer implementations. // [#next-free-field: 8] message CommonLbConfig { // Configuration for :ref:`zone aware routing // `. message ZoneAwareLbConfig { // Configures percentage of requests that will be considered for zone aware routing // if zone aware routing is configured. If not specified, the default is 100%. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. type.Percent routing_enabled = 1; // Configures minimum upstream cluster size required for zone aware routing // If upstream cluster size is less than specified, zone aware routing is not performed // even if zone aware routing is configured. If not specified, the default is 6. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. google.protobuf.UInt64Value min_cluster_size = 2; // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic // mode`. Instead, the cluster will fail all // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a // failing service. bool fail_traffic_on_panic = 3; } // Configuration for :ref:`locality weighted load balancing // ` message LocalityWeightedLbConfig { } // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) message ConsistentHashingLbConfig { // If set to `true`, the cluster will use hostname instead of the resolved // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. bool use_hostname_for_hashing = 1; } // Configures the :ref:`healthy panic threshold `. // If not specified, the default is 50%. // To disable panic mode, set to 0%. // // .. note:: // The specified percent will be truncated to the nearest 1%. type.Percent healthy_panic_threshold = 1; oneof locality_config_specifier { ZoneAwareLbConfig zone_aware_lb_config = 2; LocalityWeightedLbConfig locality_weighted_lb_config = 3; } // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when // the first update happens. This is useful for big clusters, with potentially noisy deploys // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new // cluster). Please always keep in mind that the use of sandbox technologies may change this // behavior. // // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge // window to 0. // // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is // because merging those updates isn't currently safe. See // https://github.com/envoyproxy/envoy/pull/3941. google.protobuf.Duration update_merge_window = 4; // If set to true, Envoy will not consider new hosts when computing load balancing weights until // they have been health checked for the first time. This will have no effect unless // active health checking is also configured. // // Ignoring a host means that for any load balancing calculations that adjust weights based // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and // panic mode) Envoy will exclude these hosts in the denominator. // // For example, with hosts in two priorities P0 and P1, where P0 looks like // {healthy, unhealthy (new), unhealthy (new)} // and where P1 looks like // {healthy, healthy} // all traffic will still hit P0, as 1 / (3 - 2) = 1. // // Enabling this will allow scaling up the number of hosts for a given cluster without entering // panic mode or triggering priority spillover, assuming the hosts pass the first health check. // // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not // contribute to the calculation when deciding whether panic mode is enabled or not. bool ignore_new_hosts_until_first_hc = 5; // If set to `true`, the cluster manager will drain all existing // connections to upstream hosts whenever hosts are added or removed from the cluster. bool close_connections_on_host_set_change = 6; // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) ConsistentHashingLbConfig consistent_hashing_lb_config = 7; } message RefreshRate { // Specifies the base interval between refreshes. This parameter is required and must be greater // than zero and less than // :ref:`max_interval `. google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { required: true gt {nanos: 1000000} }]; // Specifies the maximum interval between refreshes. This parameter is optional, but must be // greater than or equal to the // :ref:`base_interval ` if set. The default // is 10 times the :ref:`base_interval `. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } reserved 12, 15; // Configuration to use different transport sockets for different endpoints. // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. // For example, with the following match // // .. code-block:: yaml // // transport_socket_matches: // - name: "enableMTLS" // match: // acceptMTLS: true // transport_socket: // name: envoy.transport_sockets.tls // config: { ... } # tls socket configuration // - name: "defaultToPlaintext" // match: {} // transport_socket: // name: envoy.transport_sockets.raw_buffer // // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // // This field allows gradual and flexible transport socket configuration changes. // // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic // has "acceptPlaintext": "true" metadata information. // // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; // Supplies the name of the cluster which must be unique across all clusters. // The cluster name is used when emitting // :ref:`statistics ` if :ref:`alt_stat_name // ` is not provided. // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // An optional alternative to the cluster name to be used while emitting stats. // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be // confused with :ref:`Router Filter Header // `. string alt_stat_name = 28; oneof cluster_discovery_type { // The :ref:`service discovery type ` // to use for resolving the cluster. DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; // The custom cluster type. CustomClusterType cluster_type = 38; } // Configuration to use for EDS updates for the Cluster. EdsClusterConfig eds_cluster_config = 3; // The timeout for new network connections to hosts in the cluster. google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; // The :ref:`load balancer type ` to use // when picking a host in the cluster. LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true}]; // If the service discovery type is // :ref:`STATIC`, // :ref:`STRICT_DNS` // or :ref:`LOGICAL_DNS`, // then hosts is required. // // .. attention:: // // **This field is deprecated**. Set the // :ref:`load_assignment` field instead. // repeated core.Address hosts = 7 [deprecated = true]; // Setting this is required for specifying members of // :ref:`STATIC`, // :ref:`STRICT_DNS` // or :ref:`LOGICAL_DNS` clusters. // This field supersedes the *hosts* field in the v2 API. // // .. attention:: // // Setting this allows non-EDS cluster types to contain embedded EDS equivalent // :ref:`endpoint assignments`. // ClusterLoadAssignment load_assignment = 33; // Optional :ref:`active health checking ` // configuration for the cluster. If no // configuration is specified no health checking will be done and all cluster // members will be considered healthy at all times. repeated core.HealthCheck health_checks = 8; // Optional maximum requests for a single upstream connection. This parameter // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. google.protobuf.UInt32Value max_requests_per_connection = 9; // Optional :ref:`circuit breaking ` for the cluster. cluster.CircuitBreakers circuit_breakers = 10; // The TLS configuration for connections to the upstream cluster. // // .. attention:: // // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are // set, `transport_socket` takes priority. auth.UpstreamTlsContext tls_context = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // HTTP protocol options that are applied only to upstream HTTP connections. // These options apply to all HTTP versions. core.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; // Additional options when handling HTTP requests upstream. These options will be applicable to // both HTTP1 and HTTP2 requests. core.HttpProtocolOptions common_http_protocol_options = 29; // Additional options when handling HTTP1 requests. core.Http1ProtocolOptions http_protocol_options = 13; // Even if default HTTP2 protocol options are desired, this field must be // set so that Envoy will assume that the upstream supports HTTP/2 when // making new HTTP connection pool connections. Currently, Envoy only // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. core.Http2ProtocolOptions http2_protocol_options = 14; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. map extension_protocol_options = 35 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. map typed_extension_protocol_options = 36; // If the DNS refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this value is used as the cluster’s DNS refresh // rate. The value configured must be at least 1ms. If this setting is not specified, the // value defaults to 5000ms. For cluster types other than // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration = {gt {nanos: 1000000}}]; // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types // other than :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS` this setting is // ignored. RefreshRate dns_failure_refresh_rate = 44; // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS // resolution. bool respect_dns_ttl = 39; // The DNS IP address resolution policy. If this setting is not specified, the // value defaults to // :ref:`AUTO`. DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; // If DNS resolvers are specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this value is used to specify the cluster’s dns resolvers. // If this setting is not specified, the value defaults to the default // resolver, which uses /etc/resolv.conf for configuration. For cluster types // other than // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated core.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. cluster.OutlierDetection outlier_detection = 19; // The interval for removing stale hosts from a cluster type // :ref:`ORIGINAL_DST`. // Hosts are considered stale if they have not been used // as upstream destinations during this interval. New hosts are added // to original destination clusters on demand as new connections are // redirected to Envoy, causing the number of hosts in the cluster to // grow over time. Hosts that are not stale (they are actively used as // destinations) are kept in the cluster, which allows connections to // them remain open, saving the latency that would otherwise be spent // on opening new connections. If this setting is not specified, the // value defaults to 5000ms. For cluster types other than // :ref:`ORIGINAL_DST` // this setting is ignored. google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; // Optional configuration used to bind newly established upstream connections. // This overrides any bind_config specified in the bootstrap proto. // If the address and port are empty, no bind will be performed. core.BindConfig upstream_bind_config = 21; // Configuration for load balancing subsetting. LbSubsetConfig lb_subset_config = 22; // Optional configuration for the load balancing algorithm selected by // LbPolicy. Currently only // :ref:`RING_HASH` and // :ref:`LEAST_REQUEST` // has additional configuration options. // Specifying ring_hash_lb_config or least_request_lb_config without setting the corresponding // LbPolicy will generate an error at runtime. oneof lb_config { // Optional configuration for the Ring Hash load balancing policy. RingHashLbConfig ring_hash_lb_config = 23; // Optional configuration for the Original Destination load balancing policy. OriginalDstLbConfig original_dst_lb_config = 34; // Optional configuration for the LeastRequest load balancing policy. LeastRequestLbConfig least_request_lb_config = 37; } // Common configuration for all load balancer implementations. CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. // To setup TLS, set a transport socket with name `tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. core.TransportSocket transport_socket = 24; // The Metadata field can be used to provide additional information about the // cluster. It can be used for stats, logging, and varying filter behavior. // Fields should use reverse DNS notation to denote which entity within Envoy // will need the information. For instance, if the metadata is intended for // the Router filter, the filter name should be specified as *envoy.filters.http.router*. core.Metadata metadata = 25; // Determines how Envoy selects the protocol used to speak to upstream hosts. ClusterProtocolSelection protocol_selection = 26; // Optional options for upstream connections. UpstreamConnectionOptions upstream_connection_options = 30; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. // // .. note:: // // This is currently only supported for connections created by tcp_proxy. // // .. note:: // // The current implementation of this feature closes all connections immediately when // the unhealthy status is detected. If there are a large number of connections open // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of // time exclusively closing these connections, and not processing any other traffic. bool close_connections_on_host_health_failure = 31; // If set to true, Envoy will ignore the health value of a host when processing its removal // from service discovery. This means that if active health checking is used, Envoy will *not* // wait for the endpoint to go unhealthy before removing it. bool drain_connections_on_host_removal = 32 [(udpa.annotations.field_migrate).rename = "ignore_health_on_host_removal"]; // An (optional) network filter chain, listed in the order the filters should be applied. // The chain will be applied to all outgoing connections that Envoy makes to the upstream // servers of this cluster. repeated cluster.Filter filters = 40; // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. LoadBalancingPolicy load_balancing_policy = 41; // [#not-implemented-hide:] // If present, tells the client where to send load reports via LRS. If not present, the // client will fall back to a client-side default, which may be either (a) don't send any // load reports or (b) send load reports for all clusters to a single default server // (which may be configured in the bootstrap file). // // Note that if multiple clusters point to the same LRS server, the client may choose to // create a separate stream for each cluster or it may choose to coalesce the data for // multiple clusters onto a single stream. Either way, the client must make sure to send // the data for any given cluster on no more than one stream. // // [#next-major-version: In the v3 API, we should consider restructuring this somehow, // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation // from the LRS stream here.] core.ConfigSource lrs_server = 42; // If track_timeout_budgets is true, the :ref:`timeout budget histograms // ` will be published for each // request. These show what percentage of a request's per try and global timeout was used. A value // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. bool track_timeout_budgets = 47; } // [#not-implemented-hide:] Extensible load balancing policy configuration. // // Every LB policy defined via this mechanism will be identified via a unique name using reverse // DNS notation. If the policy needs configuration parameters, it must define a message for its // own configuration, which will be stored in the config field. The name of the policy will tell // clients which type of message they should expect to see in the config field. // // Note that there are cases where it is useful to be able to independently select LB policies // for choosing a locality and for choosing an endpoint within that locality. For example, a // given deployment may always use the same policy to choose the locality, but for choosing the // endpoint within the locality, some clusters may use weighted-round-robin, while others may // use some sort of session-based balancing. // // This can be accomplished via hierarchical LB policies, where the parent LB policy creates a // child LB policy for each locality. For each request, the parent chooses the locality and then // delegates to the child policy for that locality to choose the endpoint within the locality. // // To facilitate this, the config message for the top-level LB policy may include a field of // type LoadBalancingPolicy that specifies the child policy. message LoadBalancingPolicy { message Policy { // Required. The name of the LB policy. string name = 1; // Optional config for the LB policy. // No more than one of these two fields may be populated. google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } // Each client will iterate over the list in order and stop at the first policy that it // supports. This provides a mechanism for starting to use new LB policies that are not yet // supported by all clients. repeated Policy policies = 1; } // An extensible structure containing the address Envoy should bind to when // establishing upstream connections. message UpstreamBindConfig { // The address Envoy should bind to when establishing upstream connections. core.Address source_address = 1; } message UpstreamConnectionOptions { // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.TcpKeepalive tcp_keepalive = 1; } ================================================ FILE: api/envoy/api/v2/core/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/type:pkg", "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/api/v2/core/address.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "envoy/api/v2/core/socket_option.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "AddressProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Network addresses] message Pipe { // Unix Domain Socket path. On Linux, paths starting with '@' will use the // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. string path = 1 [(validate.rules).string = {min_bytes: 1}]; // The mode for the Pipe. Not applicable for abstract sockets. uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; } // [#next-free-field: 7] message SocketAddress { enum Protocol { TCP = 0; UDP = 1; } Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; // The address for this socket. :ref:`Listeners ` will bind // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: // It is possible to distinguish a Listener address via the prefix/suffix matching // in :ref:`FilterChainMatch `.] When used // within an upstream :ref:`BindConfig `, the address // controls the source address of outbound connections. For :ref:`clusters // `, the cluster type determines whether the // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. string address = 2 [(validate.rules).string = {min_bytes: 1}]; oneof port_specifier { option (validate.required) = true; uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; // This is only valid if :ref:`resolver_name // ` is specified below and the // named resolver is capable of named port resolution. string named_port = 4; } // The name of the custom resolver. This must have been registered with Envoy. If // this is empty, a context dependent default applies. If the address is a concrete // IP address, no resolution will occur. If address is a hostname this // should be set for resolution other than DNS. Specifying a custom resolver with // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. string resolver_name = 5; // When binding to an IPv6 address above, this enables `IPv4 compatibility // `_. Binding to ``::`` will // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into // IPv6 space as ``::FFFF:``. bool ipv4_compat = 6; } message TcpKeepalive { // Maximum number of keepalive probes to send without response before deciding // the connection is dead. Default is to use the OS level configuration (unless // overridden, Linux defaults to 9.) google.protobuf.UInt32Value keepalive_probes = 1; // The number of seconds a connection needs to be idle before keep-alive probes // start being sent. Default is to use the OS level configuration (unless // overridden, Linux defaults to 7200s (i.e., 2 hours.) google.protobuf.UInt32Value keepalive_time = 2; // The number of seconds between keep-alive probes. Default is to use the OS // level configuration (unless overridden, Linux defaults to 75s.) google.protobuf.UInt32Value keepalive_interval = 3; } message BindConfig { // The address to bind to when creating a socket. SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; // Whether to set the *IP_FREEBIND* option when creating the socket. When this // flag is set to true, allows the :ref:`source_address // ` to be an IP address // that is not configured on the system running Envoy. When this flag is set // to false, the option *IP_FREEBIND* is disabled on the socket. When this // flag is not set (default), the socket is not modified, i.e. the option is // neither enabled nor disabled. google.protobuf.BoolValue freebind = 2; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. repeated SocketOption socket_options = 3; } // Addresses specify either a logical or physical address and port, which are // used to tell Envoy where to bind/listen, connect to upstream and find // management servers. message Address { oneof address { option (validate.required) = true; SocketAddress socket_address = 1; Pipe pipe = 2; } } // CidrRange specifies an IP Address and a prefix length to construct // the subnet mask for a `CIDR `_ range. message CidrRange { // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. string address_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // Length of prefix, e.g. 0, 32. google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; } ================================================ FILE: api/envoy/api/v2/core/backoff.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "BackoffProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Backoff Strategy] // Configuration defining a jittered exponential back off strategy. message BackoffStrategy { // The base interval to be used for the next back off computation. It should // be greater than zero and less than or equal to :ref:`max_interval // `. google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { required: true gte {nanos: 1000000} }]; // Specifies the maximum interval between retries. This parameter is optional, // but must be greater than or equal to the :ref:`base_interval // ` if set. The default // is 10 times the :ref:`base_interval // `. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; } ================================================ FILE: api/envoy/api/v2/core/base.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/backoff.proto"; import "envoy/api/v2/core/http_uri.proto"; import "envoy/type/percent.proto"; import "envoy/type/semantic_version.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/core/socket_option.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "BaseProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common types] // Envoy supports :ref:`upstream priority routing // ` both at the route and the virtual // cluster level. The current priority implementation uses different connection // pool and circuit breaking settings for each priority level. This means that // even for HTTP/2 requests, two physical connections will be used to an // upstream host. In the future Envoy will likely support true HTTP/2 priority // over a single upstream connection. enum RoutingPriority { DEFAULT = 0; HIGH = 1; } // HTTP request method. enum RequestMethod { METHOD_UNSPECIFIED = 0; GET = 1; HEAD = 2; POST = 3; PUT = 4; DELETE = 5; CONNECT = 6; OPTIONS = 7; TRACE = 8; PATCH = 9; } // Identifies the direction of the traffic relative to the local Envoy. enum TrafficDirection { // Default option is unspecified. UNSPECIFIED = 0; // The transport is used for incoming traffic. INBOUND = 1; // The transport is used for outgoing traffic. OUTBOUND = 2; } // Identifies location of where either Envoy runs or where upstream hosts run. message Locality { // Region this :ref:`zone ` belongs to. string region = 1; // Defines the local service zone where Envoy is running. Though optional, it // should be set if discovery service routing is used and the discovery // service exposes :ref:`zone data `, // either in this message or via :option:`--service-zone`. The meaning of zone // is context dependent, e.g. `Availability Zone (AZ) // `_ // on AWS, `Zone `_ on // GCP, etc. string zone = 2; // When used for locality of upstream hosts, this field further splits zone // into smaller chunks of sub-zones so they can be load balanced // independently. string sub_zone = 3; } // BuildVersion combines SemVer version of extension with free-form build information // (i.e. 'alpha', 'private-build') as a set of strings. message BuildVersion { // SemVer version of extension. type.SemanticVersion version = 1; // Free-form build information. // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } // Version and identification for an Envoy extension. // [#next-free-field: 6] message Extension { // This is the name of the Envoy filter as specified in the Envoy // configuration, e.g. envoy.filters.http.router, com.acme.widget. string name = 1; // Category of the extension. // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from // acme.com vendor. // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] string category = 2; // [#not-implemented-hide:] Type descriptor of extension configuration proto. // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] string type_descriptor = 3; // The version is a property of the extension and maintained independently // of other extensions and the Envoy API. // This field is not set when extension did not provide version information. BuildVersion version = 4; // Indicates that the extension is present but was disabled via dynamic configuration. bool disabled = 5; } // Identifies a specific Envoy instance. The node identifier is presented to the // management server, which may use this identifier to distinguish per Envoy // configuration for serving. // [#next-free-field: 12] message Node { // An opaque node identifier for the Envoy node. This also provides the local // service node name. It should be set if any of the following features are // used: :ref:`statsd `, :ref:`CDS // `, and :ref:`HTTP tracing // `, either in this message or via // :option:`--service-node`. string id = 1; // Defines the local service cluster name where Envoy is running. Though // optional, it should be set if any of the following features are used: // :ref:`statsd `, :ref:`health check cluster // verification // `, // :ref:`runtime override directory `, // :ref:`user agent addition // `, // :ref:`HTTP global rate limiting `, // :ref:`CDS `, and :ref:`HTTP tracing // `, either in this message or via // :option:`--service-cluster`. string cluster = 2; // Opaque metadata extending the node identifier. Envoy will pass this // directly to the management server. google.protobuf.Struct metadata = 3; // Locality specifying where the Envoy instance is running. Locality locality = 4; // This is motivated by informing a management server during canary which // version of Envoy is being tested in a heterogeneous fleet. This will be set // by Envoy in management server RPCs. // This field is deprecated in favor of the user_agent_name and user_agent_version values. string build_version = 5 [deprecated = true]; // Free-form string that identifies the entity requesting config. // E.g. "envoy" or "grpc" string user_agent_name = 6; oneof user_agent_version_type { // Free-form string that identifies the version of the entity requesting config. // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" string user_agent_version = 7; // Structured version of the entity requesting config. BuildVersion user_agent_build_version = 8; } // List of extensions and their versions supported by the node. repeated Extension extensions = 9; // Client feature support list. These are well known features described // in the Envoy API repository for a given major version of an API. Client features // use reverse DNS naming scheme, for example `com.acme.feature`. // See :ref:`the list of features ` that xDS client may // support. repeated string client_features = 10; // Known listening ports on the node as a generic hint to the management server // for filtering :ref:`listeners ` to be returned. For example, // if there is a listener bound to port 80, the list can optionally contain the // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. repeated Address listening_addresses = 11; } // Metadata provides additional inputs to filters based on matched listeners, // filter chains, routes and endpoints. It is structured as a map, usually from // filter name (in reverse DNS format) to metadata specific to the filter. Metadata // key-values for a filter are merged as connection and request handling occurs, // with later values for the same key overriding earlier values. // // An example use of metadata is providing additional values to // http_connection_manager in the envoy.http_connection_manager.access_log // namespace. // // Another example use of metadata is to per service config info in cluster metadata, which may get // consumed by multiple filters. // // For load balancing, Metadata provides a means to subset cluster endpoints. // Endpoints have a Metadata object associated and routes contain a Metadata // object to match against. There are some well defined metadata used today for // this purpose: // // * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an // endpoint and is also used during header processing // (x-envoy-upstream-canary) and for stats purposes. // [#next-major-version: move to type/metadata/v2] message Metadata { // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* // namespace is reserved for Envoy's built-in filters. map filter_metadata = 1; } // Runtime derived uint32 with a default when not specified. message RuntimeUInt32 { // Default value if runtime value is not available. uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. string runtime_key = 3 [(validate.rules).string = {min_bytes: 1}]; } // Runtime derived double with a default when not specified. message RuntimeDouble { // Default value if runtime value is not available. double default_value = 1; // Runtime key to get value for comparison. This value is used if defined. string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; } // Runtime derived bool with a default when not specified. message RuntimeFeatureFlag { // Default value if runtime value is not available. google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; // Runtime key to get value for comparison. This value is used if defined. The boolean value must // be represented via its // `canonical JSON encoding `_. string runtime_key = 2 [(validate.rules).string = {min_bytes: 1}]; } // Header name/value pair. message HeaderValue { // Header name. string key = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Header value. // // The same :ref:`format specifier ` as used for // :ref:`HTTP access logging ` applies here, however // unknown header values are replaced with the empty string instead of `-`. string value = 2 [ (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} ]; } // Header name/value pair plus option to control append behavior. message HeaderValueOption { // Header name/value pair that this option applies to. HeaderValue header = 1 [(validate.rules).message = {required: true}]; // Should the value be appended? If true (default), the value is appended to // existing values. google.protobuf.BoolValue append = 2; } // Wrapper for a set of headers. message HeaderMap { repeated HeaderValue headers = 1; } // Data source consisting of either a file or an inline value. message DataSource { oneof specifier { option (validate.required) = true; // Local filesystem data source. string filename = 1 [(validate.rules).string = {min_bytes: 1}]; // Bytes inlined in the configuration. bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. string inline_string = 3 [(validate.rules).string = {min_bytes: 1}]; } } // The message specifies the retry policy of remote data source when fetching fails. message RetryPolicy { // Specifies parameters that control :ref:`retry backoff strategy `. // This parameter is optional, in which case the default base interval is 1000 milliseconds. The // default maximum interval is 10 times the base interval. BackoffStrategy retry_back_off = 1; // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. google.protobuf.UInt32Value num_retries = 2; } // The message specifies how to fetch data from remote and how to verify it. message RemoteDataSource { // The HTTP URI to fetch the remote data. HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. string sha256 = 2 [(validate.rules).string = {min_bytes: 1}]; // Retry policy for fetching remote data. RetryPolicy retry_policy = 3; } // Async data source which support async data fetch. message AsyncDataSource { oneof specifier { option (validate.required) = true; // Local async data source. DataSource local = 1; // Remote async data source. RemoteDataSource remote = 2; } } // Configuration for transport socket in :ref:`listeners ` and // :ref:`clusters `. If the configuration is // empty, a default transport socket implementation and configuration will be // chosen based on the platform and existence of tls_context. message TransportSocket { // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } // Runtime derived FractionalPercent with defaults for when the numerator or denominator is not // specified via a runtime key. // // .. note:: // // Parsing of the runtime key's data is implemented such that it may be represented as a // :ref:`FractionalPercent ` proto represented as JSON/YAML // and may also be represented as an integer with the assumption that the value is an integral // percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse // as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. message RuntimeFractionalPercent { // Default value if the runtime value's for the numerator/denominator keys are not available. type.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; // Runtime key for a YAML representation of a FractionalPercent. string runtime_key = 2; } // Identifies a specific ControlPlane instance that Envoy is connected to. message ControlPlane { // An opaque control plane identifier that uniquely identifies an instance // of control plane. This can be used to identify which control plane instance, // the Envoy is connected to. string identifier = 1; } ================================================ FILE: api/envoy/api/v2/core/config_source.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "envoy/api/v2/core/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "ConfigSourceProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Configuration sources] // xDS API version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API // versioning. If a client does not support v2 (e.g. due to deprecation), this // is an invalid value. AUTO = 0; // Use xDS v2 API. V2 = 1; // Use xDS v3 API. V3 = 2; } // API configuration source. This identifies the API type and cluster that Envoy // will use to fetch an xDS API. // [#next-free-field: 9] message ApiConfigSource { // APIs may be fetched via either REST or gRPC. enum ApiType { // Ideally this would be 'reserved 0' but one can't reserve the default // value. Instead we throw an exception if this is ever used. UNSUPPORTED_REST_LEGACY = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; // REST-JSON v2 API. The `canonical JSON encoding // `_ for // the v2 protos is used. REST = 1; // gRPC v2 API. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. DELTA_GRPC = 3; } // API type (gRPC, REST, delta gRPC) ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; // API version for xDS transport protocol. This describes the xDS gRPC/REST // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; // Cluster names should be used only with REST. If > 1 // cluster is defined, clusters will be cycled through if any kind of failure // occurs. // // .. note:: // // The cluster with name ``cluster_name`` must be statically defined and its // type must not be ``EDS``. repeated string cluster_names = 2; // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, // services will be cycled through if any kind of failure occurs. repeated GrpcService grpc_services = 4; // For REST APIs, the delay between successive polls. google.protobuf.Duration refresh_delay = 3; // For REST APIs, the request timeout. If not set, a default value of 1s will be used. google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be // rate limited. RateLimitSettings rate_limit_settings = 6; // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. bool set_node_on_first_message_only = 7; } // Aggregated Discovery Service (ADS) options. This is currently empty, but when // set in :ref:`ConfigSource ` can be used to // specify that ADS is to be used. message AggregatedConfigSource { } // [#not-implemented-hide:] // Self-referencing config source options. This is currently empty, but when // set in :ref:`ConfigSource ` can be used to // specify that other data can be obtained from the same server. message SelfConfigSource { // API version for xDS transport protocol. This describes the xDS gRPC/REST // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. message RateLimitSettings { // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a // default value of 100 will be used. google.protobuf.UInt32Value max_tokens = 1; // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens // per second will be used. google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; } // Configuration for :ref:`listeners `, :ref:`clusters // `, :ref:`routes // `, :ref:`endpoints // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. // [#next-free-field: 7] message ConfigSource { oneof config_source_specifier { option (validate.required) = true; // Path on the filesystem to source and watch for configuration updates. // When sourcing configuration for :ref:`secret `, // the certificate and key files are also watched for updates. // // .. note:: // // The path to the source must exist at config load time. // // .. note:: // // Envoy will only watch the file path for *moves.* This is because in general only moves // are atomic. The same method of swapping files as is demonstrated in the // :ref:`runtime documentation ` can be used here also. string path = 1; // API configuration source. ApiConfigSource api_config_source = 2; // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. AggregatedConfigSource ads = 3; // [#not-implemented-hide:] // When set, the client will access the resources from the same server it got the // ConfigSource from, although not necessarily from the same stream. This is similar to the // :ref:`ads` field, except that the client may use a // different stream to the same server. As a result, this field can be used for things // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) // LDS to RDS on the same server without requiring the management server to know its name // or required credentials. // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since // this field can implicitly mean to use the same stream in the case where the ConfigSource // is provided via ADS and the specified data can also be obtained via ADS.] SelfConfigSource self = 5; } // When this timeout is specified, Envoy will wait no longer than the specified time for first // config response on this xDS subscription during the :ref:`initialization process // `. After reaching the timeout, Envoy will move to the next // initialization phase, even if the first config is not delivered yet. The timer is activated // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another // timeout applies). The default is 15s. google.protobuf.Duration initial_fetch_timeout = 4; // API version for xDS resources. This implies the type URLs that the client // will request for resources and the resource type that the client will in // turn expect to be delivered. ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/api/v2/core/event_service_config.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "envoy/api/v2/core/grpc_service.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "EventServiceConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#not-implemented-hide:] // Configuration of the event reporting service endpoint. message EventServiceConfig { oneof config_source_specifier { option (validate.required) = true; // Specifies the gRPC service that hosts the event reporting service. GrpcService grpc_service = 1; } } ================================================ FILE: api/envoy/api/v2/core/grpc_method_list.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "GrpcMethodListProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC method list] // A list of gRPC methods which can be used as an allowlist, for example. message GrpcMethodList { message Service { // The name of the gRPC service. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The names of the gRPC methods in this service. repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; } repeated Service services = 1; } ================================================ FILE: api/envoy/api/v2/core/grpc_service.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "GrpcServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC services] // gRPC service configuration. This is used by :ref:`ApiConfigSource // ` and filter configurations. // [#next-free-field: 6] message GrpcService { message EnvoyGrpc { // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; } // [#next-free-field: 7] message GoogleGrpc { // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. message SslCredentials { // PEM encoded server root certificates. DataSource root_certs = 1; // PEM encoded client private key. DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; // PEM encoded client certificate chain. DataSource cert_chain = 3; } // Local channel credentials. Only UDS is supported for now. // See https://github.com/grpc/grpc/pull/15909. message GoogleLocalCredentials { } // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call // credential types. message ChannelCredentials { oneof credential_specifier { option (validate.required) = true; SslCredentials ssl_credentials = 1; // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 google.protobuf.Empty google_default = 2; GoogleLocalCredentials local_credentials = 3; } } // [#next-free-field: 8] message CallCredentials { message ServiceAccountJWTAccessCredentials { string json_key = 1; uint64 token_lifetime_seconds = 2; } message GoogleIAMCredentials { string authorization_token = 1; string authority_selector = 2; } message MetadataCredentialsFromPlugin { string name = 1; oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } // Security token service configuration that allows Google gRPC to // fetch security token from an OAuth 2.0 authorization server. // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and // https://github.com/grpc/grpc/pull/19587. // [#next-free-field: 10] message StsService { // URI of the token exchange service that handles token exchange requests. // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by // https://github.com/envoyproxy/protoc-gen-validate/issues/303] string token_exchange_service_uri = 1; // Location of the target service or resource where the client // intends to use the requested security token. string resource = 2; // Logical name of the target service where the client intends to // use the requested security token. string audience = 3; // The desired scope of the requested security token in the // context of the service or resource where the token will be used. string scope = 4; // Type of the requested security token. string requested_token_type = 5; // The path of subject token, a security token that represents the // identity of the party on behalf of whom the request is being made. string subject_token_path = 6 [(validate.rules).string = {min_bytes: 1}]; // Type of the subject token. string subject_token_type = 7 [(validate.rules).string = {min_bytes: 1}]; // The path of actor token, a security token that represents the identity // of the acting party. The acting party is authorized to use the // requested security token and act on behalf of the subject. string actor_token_path = 8; // Type of the actor token. string actor_token_type = 9; } oneof credential_specifier { option (validate.required) = true; // Access token credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. string access_token = 1; // Google Compute Engine credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 google.protobuf.Empty google_compute_engine = 2; // Google refresh token credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. string google_refresh_token = 3; // Service Account JWT Access credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; // Google IAM credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. GoogleIAMCredentials google_iam = 5; // Custom authenticator credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. MetadataCredentialsFromPlugin from_plugin = 6; // Custom security token service which implements OAuth 2.0 token exchange. // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 // See https://github.com/grpc/grpc/pull/19587. StsService sts_service = 7; } } // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. string target_uri = 1 [(validate.rules).string = {min_bytes: 1}]; ChannelCredentials channel_credentials = 2; // A set of call credentials that can be composed with `channel credentials // `_. repeated CallCredentials call_credentials = 3; // The human readable prefix to use when emitting statistics for the gRPC // service. // // .. csv-table:: // :header: Name, Type, Description // :widths: 1, 1, 2 // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with string stat_prefix = 4 [(validate.rules).string = {min_bytes: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel // credentials based on other configuration parameters. string credentials_factory_name = 5; // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; } reserved 4; oneof target_specifier { option (validate.required) = true; // Envoy's in-built gRPC client. // See the :ref:`gRPC services overview ` // documentation for discussion on gRPC client selection. EnvoyGrpc envoy_grpc = 1; // `Google C++ gRPC client `_ // See the :ref:`gRPC services overview ` // documentation for discussion on gRPC client selection. GoogleGrpc google_grpc = 2; } // The timeout for the gRPC request. This is the timeout for a specific // request. google.protobuf.Duration timeout = 3; // Additional metadata to include in streams initiated to the GrpcService. // This can be used for scenarios in which additional ad hoc authorization // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. repeated HeaderValue initial_metadata = 5; } ================================================ FILE: api/envoy/api/v2/core/health_check.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/event_service_config.proto"; import "envoy/type/http.proto"; import "envoy/type/matcher/string.proto"; import "envoy/type/range.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health check] // * Health checking :ref:`architecture overview `. // * If health checking is configured for a cluster, additional statistics are emitted. They are // documented :ref:`here `. // Endpoint health status. enum HealthStatus { // The health status is not known. This is interpreted by Envoy as *HEALTHY*. UNKNOWN = 0; // Healthy. HEALTHY = 1; // Unhealthy. UNHEALTHY = 2; // Connection draining in progress. E.g., // ``_ // or // ``_. // This is interpreted by Envoy as *UNHEALTHY*. DRAINING = 3; // Health check timed out. This is part of HDS and is interpreted by Envoy as // *UNHEALTHY*. TIMEOUT = 4; // Degraded. DEGRADED = 5; } // [#next-free-field: 23] message HealthCheck { // Describes the encoding of the payload bytes in the payload. message Payload { oneof payload { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". string text = 1 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; } } // [#next-free-field: 12] message HttpHealthCheck { // The value of the host header in the HTTP health check request. If // left empty (default value), the name of the cluster this health check is associated // with will be used. The host header can be customized for a specific endpoint by setting the // :ref:`hostname ` field. string host = 1; // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. string path = 2 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; // [#not-implemented-hide:] HTTP specific response. Payload receive = 4; // An optional service name parameter which is used to validate the identity of // the health checked cluster. See the :ref:`architecture overview // ` for more information. // // .. attention:: // // This field has been deprecated in favor of `service_name_matcher` for better flexibility // over matching with service-cluster name. string service_name = 5 [deprecated = true]; // Specifies a list of HTTP headers that should be added to each request that is sent to the // health checked cluster. For more information, including details on header value syntax, see // the documentation on :ref:`custom request headers // `. repeated HeaderValueOption request_headers_to_add = 6 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request that is sent to the // health checked cluster. repeated string request_headers_to_remove = 8; // If set, health checks will be made using http/2. // Deprecated, use :ref:`codec_client_type // ` instead. bool use_http2 = 7 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open // semantics of :ref:`Int64Range `. The start and end of each // range are required. Only statuses in the range [100, 600) are allowed. repeated type.Int64Range expected_statuses = 9; // Use specified application protocol for health checks. type.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; // An optional service name parameter which is used to validate the identity of // the health checked cluster using a :ref:`StringMatcher // `. See the :ref:`architecture overview // ` for more information. type.matcher.StringMatcher service_name_matcher = 11; } message TcpHealthCheck { // Empty payloads imply a connect-only health check. Payload send = 1; // When checking the response, “fuzzy” matching is performed such that each // binary block must be found, and in the order specified, but not // necessarily contiguous. repeated Payload receive = 2; } message RedisHealthCheck { // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance // by setting the specified key to any value and waiting for traffic to drain. string key = 1; } // `grpc.health.v1.Health // `_-based // healthcheck. See `gRPC doc `_ // for details. message GrpcHealthCheck { // An optional service name parameter which will be sent to gRPC service in // `grpc.health.v1.HealthCheckRequest // `_. // message. See `gRPC health-checking overview // `_ for more information. string service_name = 1; // The value of the :authority header in the gRPC health check request. If // left empty (default value), the name of the cluster this health check is associated // with will be used. The authority header can be customized for a specific endpoint by setting // the :ref:`hostname ` field. string authority = 2; } // Custom health check. message CustomHealthCheck { // The registered name of the custom health checker. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } // Health checks occur over the transport socket specified for the cluster. This implies that if a // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. // // This allows overriding the cluster TLS settings, just for health check connections. message TlsOptions { // Specifies the ALPN protocols for health check connections. This is useful if the // corresponding upstream is using ALPN-based :ref:`FilterChainMatch // ` along with different protocols for health checks // versus data connections. If empty, no ALPN protocols will be set on health check connections. repeated string alpn_protocols = 1; } reserved 10; // The time to wait for a health check response. If the timeout is reached the // health check attempt will be considered a failure. google.protobuf.Duration timeout = 1 [(validate.rules).duration = { required: true gt {} }]; // The interval between health checks. google.protobuf.Duration interval = 2 [(validate.rules).duration = { required: true gt {} }]; // An optional jitter amount in milliseconds. If specified, Envoy will start health // checking after for a random time in ms between 0 and initial_jitter. This only // applies to the first health check. google.protobuf.Duration initial_jitter = 20; // An optional jitter amount in milliseconds. If specified, during every // interval Envoy will add interval_jitter to the wait time. google.protobuf.Duration interval_jitter = 3; // An optional jitter amount as a percentage of interval_ms. If specified, // during every interval Envoy will add interval_ms * // interval_jitter_percent / 100 to the wait time. // // If interval_jitter_ms and interval_jitter_percent are both set, both of // them will be used to increase the wait time. uint32 interval_jitter_percent = 18; // The number of unhealthy health checks required before a host is marked // unhealthy. Note that for *http* health checking if a host responds with 503 // this threshold is ignored and the host is considered unhealthy immediately. google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; // The number of healthy health checks required before a host is marked // healthy. Note that during startup, only a single successful health check is // required to mark a host healthy. google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Non-serving port for health checking. google.protobuf.UInt32Value alt_port = 6; // Reuse health check connection between health checks. Default is true. google.protobuf.BoolValue reuse_connection = 7; oneof health_checker { option (validate.required) = true; // HTTP health check. HttpHealthCheck http_health_check = 8; // TCP health check. TcpHealthCheck tcp_health_check = 9; // gRPC health check. GrpcHealthCheck grpc_health_check = 11; // Custom health check. CustomHealthCheck custom_health_check = 13; } // The "no traffic interval" is a special health check interval that is used when a cluster has // never had traffic routed to it. This lower interval allows cluster information to be kept up to // date, without sending a potentially large amount of active health checking traffic for no // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the // standard health check interval that is defined. Note that this interval takes precedence over // any other. // // The default value for "no traffic interval" is 60 seconds. google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks // Envoy will shift back to using either "unhealthy interval" if present or the standard health // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; // Specifies the path to the :ref:`health check event log `. // If empty, no event log will be written. string event_log_path = 17; // [#not-implemented-hide:] // The gRPC service for the health check event service. // If empty, health check events won't be sent to a remote endpoint. EventServiceConfig event_service = 22; // If set to true, health check failure events will always be logged. If set to false, only the // initial health check failure event will be logged. // The default value is false. bool always_log_health_check_failures = 19; // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; } ================================================ FILE: api/envoy/api/v2/core/http_uri.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "HttpUriProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP Service URI ] // Envoy external URI descriptor message HttpUri { // The HTTP server URI. It should be a full FQDN with protocol, host and path. // // Example: // // .. code-block:: yaml // // uri: https://www.googleapis.com/oauth2/v1/certs // string uri = 1 [(validate.rules).string = {min_bytes: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or // inline DNS resolution. See `issue // `_. oneof http_upstream_type { option (validate.required) = true; // A cluster is created in the Envoy "cluster_manager" config // section. This field specifies the cluster name. // // Example: // // .. code-block:: yaml // // cluster: jwks_cluster // string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. google.protobuf.Duration timeout = 3 [(validate.rules).duration = { required: true gte {} }]; } ================================================ FILE: api/envoy/api/v2/core/protocol.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "ProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Protocol options] // [#not-implemented-hide:] message TcpProtocolOptions { } message UpstreamHttpProtocolOptions { // Set transport socket `SNI `_ for new // upstream connections based on the downstream HTTP host/authority header, as seen by the // :ref:`router filter `. bool auto_sni = 1; // Automatic validate upstream presented certificate for new upstream connections based on the // downstream HTTP host/authority header, as seen by the // :ref:`router filter `. // This field is intended to set with `auto_sni` field. bool auto_san_validation = 2; } // [#next-free-field: 6] message HttpProtocolOptions { // Action to take when Envoy receives client request with header names containing underscore // characters. // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore // characters. enum HeadersWithUnderscoresAction { // Allow headers with underscores. This is the default behavior. ALLOW = 0; // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter // is incremented for each rejected request. REJECT_REQUEST = 1; // Drop the header with name containing underscores. The header is dropped before the filter chain is // invoked and as such filters will not see dropped headers. The // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. DROP_HEADER = 2; } // The idle timeout for connections. The idle timeout is defined as the // period in which there are no active requests. When the // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 // downstream connection a drain sequence will occur prior to closing the connection, see // :ref:`drain_timeout // `. // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. // // .. warning:: // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP // FIN packets, etc. google.protobuf.Duration idle_timeout = 1; // The maximum duration of a connection. The duration is defined as a period since a connection // was established. If not set, there is no max duration. When max_connection_duration is reached // the connection will be closed. Drain sequence will occur prior to closing the connection if // if's applicable. See :ref:`drain_timeout // `. // Note: not implemented for upstream connections. google.protobuf.Duration max_connection_duration = 3; // The maximum number of headers. If unconfigured, the default // maximum number of request headers allowed is 100. Requests that exceed this limit will receive // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. // If this setting is not specified, the value defaults to ALLOW. // Note: upstream responses are not affected by this setting. HeadersWithUnderscoresAction headers_with_underscores_action = 5; } // [#next-free-field: 6] message Http1ProtocolOptions { message HeaderKeyFormat { message ProperCaseWords { } oneof header_format { option (validate.required) = true; // Formats the header by proper casing words: the first character and any character following // a special character will be capitalized if it's an alpha character. For example, // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". // Note that while this results in most headers following conventional casing, certain headers // are not covered. For example, the "TE" header will be formatted as "Te". ProperCaseWords proper_case_words = 1; } } // Handle HTTP requests with absolute URLs in the requests. These requests // are generally sent by clients to forward/explicit proxies. This allows clients to configure // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the // *http_proxy* environment variable. google.protobuf.BoolValue allow_absolute_url = 1; // Handle incoming HTTP/1.0 and HTTP 0.9 requests. // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 // style connect logic, dechunking, and handling lack of client host iff // *default_host_for_http_10* is configured. bool accept_http_10 = 2; // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as // Envoy does not otherwise support HTTP/1.0 without a Host header. // This is a no-op if *accept_http_10* is not true. string default_host_for_http_10 = 3; // Describes how the keys for response headers should be formatted. By default, all header keys // are lower cased. HeaderKeyFormat header_key_format = 4; // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. // // .. attention:: // // Note that this only happens when Envoy is chunk encoding which occurs when: // - The request is HTTP/1.1. // - Is neither a HEAD only request nor a HTTP Upgrade. // - Not a response to a HEAD request. // - The content length header is not present. bool enable_trailers = 5; } // [#next-free-field: 14] message Http2ProtocolOptions { // Defines a parameter to be sent in the SETTINGS frame. // See `RFC7540, sec. 6.5.1 `_ for details. message SettingsParameter { // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ (validate.rules).uint32 = {lte: 65536 gte: 1}, (validate.rules).message = {required: true} ]; // The 32 bit parameter value. google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; } // `Maximum table size `_ // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header // compression. google.protobuf.UInt32Value hpack_table_size = 1; // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. // // For upstream connections, this also limits how many streams Envoy will initiate concurrently // on a single connection. If the limit is reached, Envoy may queue requests or establish // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; // `Initial stream-level flow-control window // `_ size. Valid values range from 65535 // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 // (256 * 1024 * 1024). // // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default // window size now, so it's also the minimum. // // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the codec buffers. google.protobuf.UInt32Value initial_stream_window_size = 3 [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Similar to *initial_stream_window_size*, but for connection-level flow-control // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. google.protobuf.UInt32Value initial_connection_window_size = 4 [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Allows proxying Websocket and other upgrades over H2 connect. bool allow_connect = 5; // [#not-implemented-hide:] Hiding until envoy has full metadata support. // Still under implementation. DO NOT USE. // // Allows metadata. See [metadata // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more // information. bool allow_metadata = 6; // Limit the number of pending outbound downstream frames of all types (frames that are waiting to // be written into the socket). Exceeding this limit triggers flood mitigation and connection is // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due // to flood mitigation. The default limit is 10000. // [#comment:TODO: implement same limits for upstream outbound frames as well.] google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, // preventing high memory utilization when receiving continuous stream of these frames. Exceeding // this limit triggers flood mitigation and connection is terminated. The // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood // mitigation. The default limit is 1000. // [#comment:TODO: implement same limits for upstream outbound frames as well.] google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` // stat tracks the number of connections terminated due to flood mitigation. // Setting this to 0 will terminate connection upon receiving first frame with an empty payload // and no end stream flag. The default limit is 1. // [#comment:TODO: implement same limits for upstream inbound frames as well.] google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number // of PRIORITY frames received over the lifetime of connection exceeds the value calculated // using this formula:: // // max_inbound_priority_frames_per_stream * (1 + inbound_streams) // // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks // the number of connections terminated due to flood mitigation. The default limit is 100. // [#comment:TODO: implement same limits for upstream inbound frames as well.] google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated // using this formula:: // // 1 + 2 * (inbound_streams + // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) // // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks // the number of connections terminated due to flood mitigation. The default limit is 10. // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, // but more complex implementations that try to estimate available bandwidth require at least 2. // [#comment:TODO: implement same limits for upstream inbound frames as well.] google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 [(validate.rules).uint32 = {gte: 1}]; // Allows invalid HTTP messaging and headers. When this option is disabled (default), then // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // // See `RFC7540, sec. 8.1 `_ for details. bool stream_error_on_invalid_http_messaging = 12; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: // // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by // Envoy. // // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field // 'allow_connect'. // // Note that custom parameters specified through this field can not also be set in the // corresponding named parameters: // // .. code-block:: text // // ID Field Name // ---------------- // 0x1 hpack_table_size // 0x3 max_concurrent_streams // 0x4 initial_stream_window_size // // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies // between custom parameters with the same identifier will trigger a failure. // // See `IANA HTTP/2 Settings // `_ for // standardized identifiers. repeated SettingsParameter custom_settings_parameters = 13; } // [#not-implemented-hide:] message GrpcProtocolOptions { Http2ProtocolOptions http2_protocol_options = 1; } ================================================ FILE: api/envoy/api/v2/core/socket_option.proto ================================================ syntax = "proto3"; package envoy.api.v2.core; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.core"; option java_outer_classname = "SocketOptionProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.core.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Socket Option ] // Generic socket option message. This would be used to set socket options that // might not exist in upstream kernels or precompiled Envoy binaries. // [#next-free-field: 7] message SocketOption { enum SocketState { // Socket options are applied after socket creation but before binding the socket to a port STATE_PREBIND = 0; // Socket options are applied after binding the socket to a port but before calling listen() STATE_BOUND = 1; // Socket options are applied after calling listen() STATE_LISTENING = 2; } // An optional name to give this socket option for debugging, etc. // Uniqueness is not required and no special meaning is assumed. string description = 1; // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP int64 level = 2; // The numeric name as passed to setsockopt int64 name = 3; oneof value { option (validate.required) = true; // Because many sockopts take an int value. int64 int_value = 4; // Otherwise it's a byte buffer. bytes buf_value = 5; } // The state in which the option will be applied. When used in BindConfig // STATE_PREBIND is currently the only valid value. SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/api/v2/discovery.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "DiscoveryProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.discovery.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common discovery API components] // A DiscoveryRequest requests a set of versioned resources of the same type for // a given Envoy node on some API. // [#next-free-field: 7] message DiscoveryRequest { // The version_info provided in the request messages will be the version_info // received with the most recent successfully processed response or empty on // the first request. It is expected that no new request is sent after a // response is received until the Envoy instance is ready to ACK/NACK the new // configuration. ACK/NACK takes place by returning the new API config version // as applied or the previous API config version respectively. Each type_url // (see below) has an independent version associated with it. string version_info = 1; // The node making the request. core.Node node = 2; // List of resources to subscribe to, e.g. list of cluster names or a route // configuration name. If this is empty, all resources for the API are // returned. LDS/CDS may have empty resource_names, which will cause all // resources for the Envoy instance to be returned. The LDS and CDS responses // will then imply a number of resources that need to be fetched via EDS/RDS, // which will be explicitly enumerated in resource_names. repeated string resource_names = 3; // Type of the resource that is being requested, e.g. // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is // required for ADS. string type_url = 4; // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above // discussion on version_info and the DiscoveryResponse nonce comment. This // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, // or 2) the client has not yet accepted an update in this xDS stream (unlike // delta, where it is populated only for new explicit ACKs). string response_nonce = 5; // This is populated when the previous :ref:`DiscoveryResponse ` // failed to update configuration. The *message* field in *error_details* provides the Envoy // internal exception related to the failure. It is only intended for consumption during manual // debugging, the string provided is not guaranteed to be stable across Envoy versions. google.rpc.Status error_detail = 6; } // [#next-free-field: 7] message DiscoveryResponse { // The version of the response data. string version_info = 1; // The response resources. These resources are typed and depend on the API being called. repeated google.protobuf.Any resources = 2; // [#not-implemented-hide:] // Canary is used to support two Envoy command line flags: // // * --terminate-on-canary-transition-failure. When set, Envoy is able to // terminate if it detects that configuration is stuck at canary. Consider // this example sequence of updates: // - Management server applies a canary config successfully. // - Management server rolls back to a production config. // - Envoy rejects the new production config. // Since there is no sensible way to continue receiving configuration // updates, Envoy will then terminate and apply production config from a // clean slate. // * --dry-run-canary. When set, a canary response will never be applied, only // validated via a dry run. bool canary = 3; // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). string type_url = 4; // For gRPC based subscriptions, the nonce provides a way to explicitly ack a // specific DiscoveryResponse in a following DiscoveryRequest. Additional // messages may have been sent by Envoy to the management server for the // previous version on the stream prior to this DiscoveryResponse, that were // unprocessed at response send time. The nonce allows the management server // to ignore any further DiscoveryRequests for the previous version until a // DiscoveryRequest bearing the nonce. The nonce is optional and is not // required for non-stream based xDS implementations. string nonce = 5; // [#not-implemented-hide:] // The control plane instance that sent the response. core.ControlPlane control_plane = 6; } // DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC // endpoint for Delta xDS. // // With Delta xDS, the DeltaDiscoveryResponses do not need to include a full // snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a // diff to the state of a xDS client. // In Delta XDS there are per-resource versions, which allow tracking state at // the resource granularity. // An xDS Delta session is always in the context of a gRPC bidirectional // stream. This allows the xDS server to keep track of the state of xDS clients // connected to it. // // In Delta xDS the nonce field is required and used to pair // DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. // Optionally, a response message level system_version_info is present for // debugging purposes only. // // DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest // can be either or both of: [1] informing the server of what resources the // client has gained/lost interest in (using resource_names_subscribe and // resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from // the server (using response_nonce, with presence of error_detail making it a NACK). // Additionally, the first message (for a given type_url) of a reconnected gRPC stream // has a third role: informing the server of the resources (and their versions) // that the client already possesses, using the initial_resource_versions field. // // As with state-of-the-world, when multiple resource types are multiplexed (ADS), // all requests/acknowledgments/updates are logically walled off by type_url: // a Cluster ACK exists in a completely separate world from a prior Route NACK. // In particular, initial_resource_versions being sent at the "start" of every // gRPC stream actually entails a message for each type_url, each with its own // initial_resource_versions. // [#next-free-field: 8] message DeltaDiscoveryRequest { // The node making the request. core.Node node = 1; // Type of the resource that is being requested, e.g. // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". string type_url = 2; // DeltaDiscoveryRequests allow the client to add or remove individual // resources to the set of tracked resources in the context of a stream. // All resource names in the resource_names_subscribe list are added to the // set of tracked resources and all resource names in the resource_names_unsubscribe // list are removed from the set of tracked resources. // // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or // resource_names_unsubscribe list simply means that no resources are to be // added or removed to the resource list. // *Like* state-of-the-world xDS, the server must send updates for all tracked // resources, but can also send updates for resources the client has not subscribed to. // // NOTE: the server must respond with all resources listed in resource_names_subscribe, // even if it believes the client has the most recent version of them. The reason: // the client may have dropped them, but then regained interest before it had a chance // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. // // These two fields can be set in any DeltaDiscoveryRequest, including ACKs // and initial_resource_versions. // // A list of Resource names to add to the list of tracked resources. repeated string resource_names_subscribe = 3; // A list of Resource names to remove from the list of tracked resources. repeated string resource_names_unsubscribe = 4; // Informs the server of the versions of the resources the xDS client knows of, to enable the // client to continue the same logical xDS session even in the face of gRPC stream reconnection. // It will not be populated: [1] in the very first stream of a session, since the client will // not yet have any resources, [2] in any message after the first in a stream (for a given // type_url), since the server will already be correctly tracking the client's state. // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) // The map's keys are names of xDS resources known to the xDS client. // The map's values are opaque resource versions. map initial_resource_versions = 5; // When the DeltaDiscoveryRequest is a ACK or NACK message in response // to a previous DeltaDiscoveryResponse, the response_nonce must be the // nonce in the DeltaDiscoveryResponse. // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. string response_nonce = 6; // This is populated when the previous :ref:`DiscoveryResponse ` // failed to update configuration. The *message* field in *error_details* // provides the Envoy internal exception related to the failure. google.rpc.Status error_detail = 7; } // [#next-free-field: 7] message DeltaDiscoveryResponse { // The version of the response data (used for debugging). string system_version_info = 1; // The response resources. These are typed resources, whose types must match // the type_url field. repeated Resource resources = 2; // field id 3 IS available! // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. string type_url = 4; // Resources names of resources that have be deleted and to be removed from the xDS Client. // Removed resources for missing resources can be ignored. repeated string removed_resources = 6; // The nonce provides a way for DeltaDiscoveryRequests to uniquely // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. string nonce = 5; } message Resource { // The resource's name, to distinguish it from others of the same type of resource. string name = 3; // The aliases are a list of other names that this resource can go by. repeated string aliases = 4; // The resource level version. It allows xDS to track the state of individual // resources. string version = 1; // The resource being tracked. google.protobuf.Any resource = 2; } ================================================ FILE: api/envoy/api/v2/eds.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/endpoint.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "EdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.endpoint.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` service EndpointDiscoveryService { option (envoy.annotations.resource).type = "envoy.api.v2.ClusterLoadAssignment"; // The resource_names field in DiscoveryRequest specifies a list of clusters // to subscribe to updates for. rpc StreamEndpoints(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } rpc DeltaEndpoints(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc FetchEndpoints(DiscoveryRequest) returns (DiscoveryResponse) { option (google.api.http).post = "/v2/discovery:endpoints"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message EdsDummy { } ================================================ FILE: api/envoy/api/v2/endpoint/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/api/v2/endpoint/endpoint.proto ================================================ syntax = "proto3"; package envoy.api.v2.endpoint; import "udpa/annotations/status.proto"; import public "envoy/api/v2/endpoint/endpoint_components.proto"; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; ================================================ FILE: api/envoy/api/v2/endpoint/endpoint_components.proto ================================================ syntax = "proto3"; package envoy.api.v2.endpoint; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/health_check.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; option java_outer_classname = "EndpointComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Endpoints] // Upstream host identifier. message Endpoint { // The optional health check configuration. message HealthCheckConfig { // Optional alternative health check port value. // // By default the health check address port of an upstream host is the same // as the host's serving address port. This provides an alternative health // check port. Setting this with a non-zero value allows an upstream host // to have different health check address port. uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; // By default, the host header for L7 health checks is controlled by cluster level configuration // (see: :ref:`host ` and // :ref:`authority `). Setting this // to a non-empty value allows overriding the cluster level configuration for a specific // endpoint. string hostname = 2; } // The upstream host address. // // .. attention:: // // The form of host address depends on the given cluster type. For STATIC or EDS, // it is expected to be a direct IP address (or something resolvable by the // specified :ref:`resolver ` // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, // and will be resolved via DNS. core.Address address = 1; // The optional health check configuration is used as configuration for the // health checker to contact the health checked host. // // .. attention:: // // This takes into effect only for upstream clusters with // :ref:`active health checking ` enabled. HealthCheckConfig health_check_config = 2; // The hostname associated with this endpoint. This hostname is not used for routing or address // resolution. If provided, it will be associated with the endpoint, and can be used for features // that require a hostname, like // :ref:`auto_host_rewrite `. string hostname = 3; } // An Endpoint that Envoy can route traffic to. // [#next-free-field: 6] message LbEndpoint { // Upstream host identifier or a named reference. oneof host_identifier { Endpoint endpoint = 1; // [#not-implemented-hide:] string endpoint_name = 5; } // Optional health status when known and supplied by EDS server. core.HealthStatus health_status = 2; // The endpoint metadata specifies values that may be used by the load // balancer to select endpoints in a cluster for a given request. The filter // name should be specified as *envoy.lb*. An example boolean key-value pair // is *canary*, providing the optional canary status of the upstream host. // This may be matched against in a route's // :ref:`RouteAction ` metadata_match field // to subset the endpoints considered in cluster load balancing. core.Metadata metadata = 3; // The optional load balancing weight of the upstream host; at least 1. // Envoy uses the load balancing weight in some of the built in load // balancers. The load balancing weight for an endpoint is divided by the sum // of the weights of all endpoints in the endpoint's locality to produce a // percentage of traffic for the endpoint. This percentage is then further // weighted by the endpoint's locality's load balancing weight from // LocalityLbEndpoints. If unspecified, each host is presumed to have equal // weight in a locality. The sum of the weights of all endpoints in the // endpoint's locality must not exceed uint32_t maximal value (4294967295). google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; } // A group of endpoints belonging to a Locality. // One can have multiple LocalityLbEndpoints for a locality, but this is // generally only done if the different groups need to have different load // balancing weights or different priorities. // [#next-free-field: 7] message LocalityLbEndpoints { // Identifies location of where the upstream hosts run. core.Locality locality = 1; // The group of endpoints belonging to the locality specified. repeated LbEndpoint lb_endpoints = 2; // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load // balancing weight for a locality is divided by the sum of the weights of all // localities at the same priority level to produce the effective percentage // of traffic for the locality. The sum of the weights of all localities at // the same priority level must not exceed uint32_t maximal value (4294967295). // // Locality weights are only considered when :ref:`locality weighted load // balancing ` is // configured. These weights are ignored otherwise. If no weights are // specified when locality weighted load balancing is enabled, the locality is // assigned no load. google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; // Optional: the priority for this LocalityLbEndpoints. If unspecified this will // default to the highest priority (0). // // Under usual circumstances, Envoy will only select endpoints for the highest // priority (0). In the event all endpoints for a particular priority are // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the // next highest priority group. // // Priorities should range from 0 (highest) to N (lowest) without skipping. uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; // Optional: Per locality proximity value which indicates how close this // locality is from the source locality. This value only provides ordering // information (lower the value, closer it is to the source locality). // This will be consumed by load balancing schemes that need proximity order // to determine where to route the requests. // [#not-implemented-hide:] google.protobuf.UInt32Value proximity = 6; } ================================================ FILE: api/envoy/api/v2/endpoint/load_report.proto ================================================ syntax = "proto3"; package envoy.api.v2.endpoint; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.endpoint"; option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // These are stats Envoy reports to GLB every so often. Report frequency is // defined by // :ref:`LoadStatsResponse.load_reporting_interval`. // Stats per upstream region/zone and optionally per subzone. // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 9] message UpstreamLocalityStats { // Name of zone, region and optionally endpoint group these metrics were // collected from. Zone and region names could be empty if unknown. core.Locality locality = 1; // The total number of requests successfully completed by the endpoints in the // locality. uint64 total_successful_requests = 2; // The total number of unfinished requests uint64 total_requests_in_progress = 3; // The total number of requests that failed due to errors at the endpoint, // aggregated over all endpoints in the locality. uint64 total_error_requests = 4; // The total number of requests that were issued by this Envoy since // the last report. This information is aggregated over all the // upstream endpoints in the locality. uint64 total_issued_requests = 8; // Stats for multi-dimensional load balancing. repeated EndpointLoadMetricStats load_metric_stats = 5; // Endpoint granularity stats information for this locality. This information // is populated if the Server requests it by setting // :ref:`LoadStatsResponse.report_endpoint_granularity`. repeated UpstreamEndpointStats upstream_endpoint_stats = 7; // [#not-implemented-hide:] The priority of the endpoint group these metrics // were collected from. uint32 priority = 6; } // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // [#next-free-field: 8] message UpstreamEndpointStats { // Upstream host address. core.Address address = 1; // Opaque and implementation dependent metadata of the // endpoint. Envoy will pass this directly to the management server. google.protobuf.Struct metadata = 6; // The total number of requests successfully completed by the endpoints in the // locality. These include non-5xx responses for HTTP, where errors // originate at the client and the endpoint responded successfully. For gRPC, // the grpc-status values are those not covered by total_error_requests below. uint64 total_successful_requests = 2; // The total number of unfinished requests for this endpoint. uint64 total_requests_in_progress = 3; // The total number of requests that failed due to errors at the endpoint. // For HTTP these are responses with 5xx status codes and for gRPC the // grpc-status values: // // - DeadlineExceeded // - Unimplemented // - Internal // - Unavailable // - Unknown // - DataLoss uint64 total_error_requests = 4; // The total number of requests that were issued to this endpoint // since the last report. A single TCP connection, HTTP or gRPC // request or stream is counted as one request. uint64 total_issued_requests = 7; // Stats for multi-dimensional load balancing. repeated EndpointLoadMetricStats load_metric_stats = 5; } // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message EndpointLoadMetricStats { // Name of the metric; may be empty. string metric_name = 1; // Number of calls that finished and included this metric. uint64 num_requests_finished_with_metric = 2; // Sum of metric values across all calls that finished with this metric for // load_reporting_interval. double total_metric_value = 3; } // Per cluster load stats. Envoy reports these stats a management server in a // :ref:`LoadStatsRequest` // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. // Next ID: 7 // [#next-free-field: 7] message ClusterStats { message DroppedRequests { // Identifier for the policy specifying the drop. string category = 1 [(validate.rules).string = {min_bytes: 1}]; // Total number of deliberately dropped requests for the category. uint64 dropped_count = 2; } // The name of the cluster. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The eds_cluster_config service_name of the cluster. // It's possible that two clusters send the same service_name to EDS, // in that case, the management server is supposed to do aggregation on the load reports. string cluster_service_name = 6; // Need at least one. repeated UpstreamLocalityStats upstream_locality_stats = 2 [(validate.rules).repeated = {min_items: 1}]; // Cluster-level stats such as total_successful_requests may be computed by // summing upstream_locality_stats. In addition, below there are additional // cluster-wide stats. // // The total number of dropped requests. This covers requests // deliberately dropped by the drop_overload policy and circuit breaking. uint64 total_dropped_requests = 3; // Information about deliberately dropped requests for each category specified // in the DropOverload policy. repeated DroppedRequests dropped_requests = 5; // Period over which the actual load report occurred. This will be guaranteed to include every // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy // and the *LoadStatsResponse* message sent from the management server, this may be longer than // the requested load reporting interval in the *LoadStatsResponse*. google.protobuf.Duration load_report_interval = 4; } ================================================ FILE: api/envoy/api/v2/endpoint.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/endpoint/endpoint_components.proto"; import "envoy/type/percent.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.endpoint.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Endpoint configuration] // Endpoint discovery :ref:`architecture overview ` // Each route from RDS will map to a single cluster or traffic split across // clusters using weights expressed in the RDS WeightedCluster. // // With EDS, each cluster is treated independently from a LB perspective, with // LB taking place between the Localities within a cluster and at a finer // granularity between the hosts within a locality. The percentage of traffic // for each endpoint is determined by both its load_balancing_weight, and the // load_balancing_weight of its locality. First, a locality will be selected, // then an endpoint within that locality will be chose based on its weight. // [#next-free-field: 6] message ClusterLoadAssignment { // Load balancing policy settings. // [#next-free-field: 6] message Policy { // [#not-implemented-hide:] message DropOverload { // Identifier for the policy specifying the drop. string category = 1 [(validate.rules).string = {min_bytes: 1}]; // Percentage of traffic that should be dropped for the category. type.FractionalPercent drop_percentage = 2; } reserved 1; // Action to trim the overall incoming traffic to protect the upstream // hosts. This action allows protection in case the hosts are unable to // recover from an outage, or unable to autoscale or unable to handle // incoming traffic volume for any reason. // // At the client each category is applied one after the other to generate // the 'actual' drop percentage on all outgoing traffic. For example: // // .. code-block:: json // // { "drop_overloads": [ // { "category": "throttle", "drop_percentage": 60 } // { "category": "lb", "drop_percentage": 50 } // ]} // // The actual drop percentages applied to the traffic at the clients will be // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this // factor (in percentage). This means that we don't consider a priority // level or locality unhealthy until the percentage of healthy hosts // multiplied by the overprovisioning factor drops below 100. // With the default value 140(1.4), Envoy doesn't consider a priority level // or a locality unhealthy until their percentage of healthy hosts drops // below 72%. For example: // // .. code-block:: json // // { "overprovisioning_factor": 100 } // // Read more at :ref:`priority levels ` and // :ref:`localities `. google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; // The max time until which the endpoints from this assignment can be used. // If no new assignments are received before this time expires the endpoints // are considered stale and should be marked unhealthy. // Defaults to 0 which means endpoints never go stale. google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; // The flag to disable overprovisioning. If it is set to true, // :ref:`overprovisioning factor // ` will be ignored // and Envoy will not perform graceful failover between priority levels or // localities as endpoints become unhealthy. Otherwise Envoy will perform // graceful failover as :ref:`overprovisioning factor // ` suggests. // [#not-implemented-hide:] bool disable_overprovisioning = 5 [deprecated = true]; } // Name of the cluster. This will be the :ref:`service_name // ` value if specified // in the cluster :ref:`EdsClusterConfig // `. string cluster_name = 1 [(validate.rules).string = {min_bytes: 1}]; // List of endpoints to load balance to. repeated endpoint.LocalityLbEndpoints endpoints = 2; // Map of named endpoints that can be referenced in LocalityLbEndpoints. // [#not-implemented-hide:] map named_endpoints = 5; // Load balancing policy settings. Policy policy = 4; } ================================================ FILE: api/envoy/api/v2/lds.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/listener.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "LdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.listener.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listener] // Listener :ref:`configuration overview ` // The Envoy instance initiates an RPC at startup to discover a list of // listeners. Updates are delivered via streaming from the LDS server and // consist of a complete update of all listeners. Existing connections will be // allowed to drain from listeners that are no longer present. service ListenerDiscoveryService { option (envoy.annotations.resource).type = "envoy.api.v2.Listener"; rpc DeltaListeners(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc StreamListeners(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } rpc FetchListeners(DiscoveryRequest) returns (DiscoveryResponse) { option (google.api.http).post = "/v2/discovery:listeners"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message LdsDummy { } ================================================ FILE: api/envoy/api/v2/listener/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/auth:pkg", "//envoy/api/v2/core:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/api/v2/listener/listener.proto ================================================ syntax = "proto3"; package envoy.api.v2.listener; import "udpa/annotations/status.proto"; import public "envoy/api/v2/listener/listener_components.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; ================================================ FILE: api/envoy/api/v2/listener/listener_components.proto ================================================ syntax = "proto3"; package envoy.api.v2.listener; import "envoy/api/v2/auth/tls.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/type/range.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "ListenerComponentsProto"; option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listener components] // Listener :ref:`configuration overview ` message Filter { reserved 3; // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 4; } } // Specifies the match criteria for selecting a specific filter chain for a // listener. // // In order for a filter chain to be selected, *ALL* of its criteria must be // fulfilled by the incoming connection, properties of which are set by the // networking stack and/or listener filters. // // The following order applies: // // 1. Destination port. // 2. Destination IP address. // 3. Server name (e.g. SNI for TLS protocol), // 4. Transport protocol. // 5. Application protocols (e.g. ALPN for TLS protocol). // 6. Source type (e.g. any, local or external network). // 7. Source IP address. // 8. Source port. // // For criteria that allow ranges or wildcards, the most specific value in any // of the configured filter chains that matches the incoming connection is going // to be used (e.g. for SNI ``www.example.com`` the most specific match would be // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] // [#next-free-field: 13] message FilterChainMatch { enum ConnectionSourceType { // Any connection source matches. ANY = 0; // Match a connection originating from the same host. LOCAL = 1 [(udpa.annotations.enum_value_migrate).rename = "SAME_IP_OR_LOOPBACK"]; // Match a connection originating from a different host. EXTERNAL = 2; } reserved 1; // Optional destination port to consider when use_original_dst is set on the // listener in determining a filter chain match. google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; // If non-empty, an IP address and prefix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. repeated core.CidrRange prefix_ranges = 3; // If non-empty, an IP address and suffix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. // [#not-implemented-hide:] string address_suffix = 4; // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; // Specifies the connection source IP match type. Can be any, local or external network. ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; // The criteria is satisfied if the source IP address of the downstream // connection is contained in at least one of the specified subnets. If the // parameter is not specified or the list is empty, the source IP address is // ignored. repeated core.CidrRange source_prefix_ranges = 6; // The criteria is satisfied if the source port of the downstream connection // is contained in at least one of the specified ports. If the parameter is // not specified, the source port is ignored. repeated uint32 source_ports = 7 [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining // a filter chain match. Those values will be compared against the server names of a new // connection, when detected by one of the listener filters. // // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. // // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. // // .. attention:: // // See the :ref:`FAQ entry ` on how to configure SNI for more // information. repeated string server_names = 11; // If non-empty, a transport protocol to consider when determining a filter chain match. // This value will be compared against the transport protocol of a new connection, when // it's detected by one of the listener filters. // // Suggested values include: // // * ``raw_buffer`` - default, used when no transport protocol is detected, // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` // when TLS protocol is detected. string transport_protocol = 9; // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when // determining a filter chain match. Those values will be compared against the application // protocols of a new connection, when detected by one of the listener filters. // // Suggested values include: // // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector // `, // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` // // .. attention:: // // Currently, only :ref:`TLS Inspector ` provides // application protocol detection based on the requested // `ALPN `_ values. // // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, // and matching on values other than ``h2`` is going to lead to a lot of false negatives, // unless all connecting clients are known to use ALPN. repeated string application_protocols = 10; } // A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and // various other parameters. // [#next-free-field: 8] message FilterChain { // The criteria to use when matching a connection to this filter chain. FilterChainMatch filter_chain_match = 1; // The TLS context for this filter chain. // // .. attention:: // // **This field is deprecated**. Use `transport_socket` with name `tls` instead. If both are // set, `transport_socket` takes priority. auth.DownstreamTlsContext tls_context = 2 [deprecated = true]; // A list of individual network filters that make up the filter chain for // connections established with the listener. Order matters as the filters are // processed sequentially as connection events happen. Note: If the filter // list is empty, the connection will close by default. repeated Filter filters = 3; // Whether the listener should expect a PROXY protocol V1 header on new // connections. If this option is enabled, the listener will assume that that // remote address of the connection is the one specified in the header. Some // load balancers including the AWS ELB support this option. If the option is // absent or set to false, Envoy will use the physical peer address of the // connection as the remote address. google.protobuf.BoolValue use_proxy_proto = 4; // [#not-implemented-hide:] filter chain metadata. core.Metadata metadata = 5; // Optional custom transport socket implementation to use for downstream connections. // To setup TLS, set a transport socket with name `tls` and // :ref:`DownstreamTlsContext ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. core.TransportSocket transport_socket = 6; // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter // chain is to be dynamically updated or removed via FCDS a unique name must be provided. string name = 7; } // Listener filter chain match configuration. This is a recursive structure which allows complex // nested match configurations to be built using various logical operators. // // Examples: // // * Matches if the destination port is 3306. // // .. code-block:: yaml // // destination_port_range: // start: 3306 // end: 3307 // // * Matches if the destination port is 3306 or 15000. // // .. code-block:: yaml // // or_match: // rules: // - destination_port_range: // start: 3306 // end: 3306 // - destination_port_range: // start: 15000 // end: 15001 // // [#next-free-field: 6] message ListenerFilterChainMatchPredicate { // A set of match configurations used for logical operations. message MatchSet { // The list of rules that make up the set. repeated ListenerFilterChainMatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { option (validate.required) = true; // A set that describes a logical OR. If any member of the set matches, the match configuration // matches. MatchSet or_match = 1; // A set that describes a logical AND. If all members of the set match, the match configuration // matches. MatchSet and_match = 2; // A negation match. The match configuration will match if the negated match condition matches. ListenerFilterChainMatchPredicate not_match = 3; // The match configuration will always match. bool any_match = 4 [(validate.rules).bool = {const: true}]; // Match destination port. Particularly, the match evaluation must use the recovered local port if // the owning listener filter is after :ref:`an original_dst listener filter `. type.Int32Range destination_port_range = 5; } } message ListenerFilter { // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. // See :ref:`ListenerFilterChainMatchPredicate ` // for further examples. ListenerFilterChainMatchPredicate filter_disabled = 4; } ================================================ FILE: api/envoy/api/v2/listener/quic_config.proto ================================================ syntax = "proto3"; package envoy.api.v2.listener; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "QuicConfigProto"; option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: QUIC listener Config] // Configuration specific to the QUIC protocol. // Next id: 4 message QuicProtocolOptions { // Maximum number of streams that the client can negotiate per connection. 100 // if not specified. google.protobuf.UInt32Value max_concurrent_streams = 1; // Maximum number of milliseconds that connection will be alive when there is // no network activity. 300000ms if not specified. google.protobuf.Duration idle_timeout = 2; // Connection timeout in milliseconds before the crypto handshake is finished. // 20000ms if not specified. google.protobuf.Duration crypto_handshake_timeout = 3; } ================================================ FILE: api/envoy/api/v2/listener/udp_listener_config.proto ================================================ syntax = "proto3"; package envoy.api.v2.listener; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.api.v2.listener"; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option csharp_namespace = "Envoy.Api.V2.ListenerNS"; option ruby_package = "Envoy.Api.V2.ListenerNS"; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: UDP Listener Config] // Listener :ref:`configuration overview ` message UdpListenerConfig { // Used to look up UDP listener factory, matches "raw_udp_listener" or // "quic_listener" to create a specific udp listener. // If not specified, treat as "raw_udp_listener". string udp_listener_name = 1; // Used to create a specific listener factory. To some factory, e.g. // "raw_udp_listener", config is not needed. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } message ActiveRawUdpListenerConfig { } ================================================ FILE: api/envoy/api/v2/listener.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/socket_option.proto"; import "envoy/api/v2/listener/listener_components.proto"; import "envoy/api/v2/listener/udp_listener_config.proto"; import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/config/listener/v2/api_listener.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` // [#next-free-field: 23] message Listener { enum DrainType { // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check // filter), listener removal/modification, and hot restart. DEFAULT = 0; // Drain in response to listener removal/modification and hot restart. This setting does not // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress // and egress listeners. MODIFY_ONLY = 1; } // [#not-implemented-hide:] message DeprecatedV1 { // Whether the listener should bind to the port. A listener that doesn't // bind can only receive connections redirected from other listeners that // set use_original_dst parameter to true. Default is true. // // This is deprecated in v2, all Listeners will bind to their port. An // additional filter chain must be created for every original destination // port this listener may redirect to in v2, with the original port // specified in the FilterChainMatch destination_port field. // // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] google.protobuf.BoolValue bind_to_port = 1; } // Configuration for listener connection balancing. message ConnectionBalanceConfig { // A connection balancer implementation that does exact balancing. This means that a lock is // held during balancing so that connection counts are nearly exactly balanced between worker // threads. This is "nearly" exact in the sense that a connection might close in parallel thus // making the counts incorrect, but this should be rectified on the next accept. This balancer // sacrifices accept throughput for accuracy and should be used when there are a small number of // connections that rarely cycle (e.g., service mesh gRPC egress). message ExactBalance { } oneof balance_type { option (validate.required) = true; // If specified, the listener will use the exact connection balancer. ExactBalance exact_balance = 1; } } reserved 14; // The unique name by which this listener is known. If no name is provided, // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically // updated or removed via :ref:`LDS ` a unique name must be provided. string name = 1; // The address that the listener should listen on. In general, the address must be unique, though // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on // Linux as the actual port will be allocated by the OS. core.Address address = 2 [(validate.rules).message = {required: true}]; // A list of filter chains to consider for this listener. The // :ref:`FilterChain ` with the most specific // :ref:`FilterChainMatch ` criteria is used on a // connection. // // Example using SNI for filter chain selection can be found in the // :ref:`FAQ entry `. repeated listener.FilterChain filter_chains = 3; // If a connection is redirected using *iptables*, the port on which the proxy // receives it might be different from the original destination address. When this flag is set to // true, the listener hands off redirected connections to the listener associated with the // original destination address. If there is no listener associated with the original destination // address, the connection is handled by the listener that receives it. Defaults to false. // // .. attention:: // // This field is deprecated. Use :ref:`an original_dst ` // :ref:`listener filter ` instead. // // Note that hand off to another listener is *NOT* performed without this flag. Once // :ref:`FilterChainMatch ` is implemented this flag // will be removed, as filter chain matching can be used to select a filter chain based on the // restored destination address. google.protobuf.BoolValue use_original_dst = 4 [deprecated = true]; // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5; // Listener metadata. core.Metadata metadata = 6; // [#not-implemented-hide:] DeprecatedV1 deprecated_v1 = 7; // The type of draining to perform at a listener-wide level. DrainType drain_type = 8; // Listener filters have the opportunity to manipulate and augment the connection metadata that // is used in connection filter chain matching, for example. These filters are run before any in // :ref:`filter_chains `. Order matters as the // filters are processed sequentially right after a socket has been accepted by the listener, and // before a connection is created. // UDP Listener filters can be specified when the protocol in the listener socket address in // :ref:`protocol ` is :ref:`UDP // `. // UDP listeners currently support a single filter. repeated listener.ListenerFilter listener_filters = 9; // The timeout to wait for all listener filters to complete operation. If the timeout is reached, // the accepted socket is closed without a connection being created unless // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the // timeout. If not specified, a default timeout of 15s is used. google.protobuf.Duration listener_filters_timeout = 15; // Whether a connection should be created when listener filters timeout. Default is false. // // .. attention:: // // Some listener filters, such as :ref:`Proxy Protocol filter // `, should not be used with this option. It will cause // unexpected behavior when a connection is created. bool continue_on_listener_filters_timeout = 17; // Whether the listener should be set as a transparent socket. // When this flag is set to true, connections can be redirected to the listener using an // *iptables* *TPROXY* target, in which case the original source and destination addresses and // ports are preserved on accepted connections. This flag should be used in combination with // :ref:`an original_dst ` :ref:`listener filter // ` to mark the connections' local addresses as // "restored." This can be used to hand off each redirected connection to another listener // associated with the connection's destination address. Direct connections to the socket without // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are // therefore treated as if they were redirected. // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. // When this flag is not set (default), the socket is not modified, i.e. the transparent option // is neither set nor reset. google.protobuf.BoolValue transparent = 10; // Whether the listener should set the *IP_FREEBIND* socket option. When this // flag is set to true, listeners can be bound to an IP address that is not // configured on the system running Envoy. When this flag is set to false, the // option *IP_FREEBIND* is disabled on the socket. When this flag is not set // (default), the socket is not modified, i.e. the option is neither enabled // nor disabled. google.protobuf.BoolValue freebind = 11; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. repeated core.SocketOption socket_options = 13; // Whether the listener should accept TCP Fast Open (TFO) connections. // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on // the socket, with a queue length of the specified size // (see `details in RFC7413 `_). // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. // When this flag is not set (default), the socket is not modified, // i.e. the option is neither enabled nor disabled. // // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable // TCP_FASTOPEN. // See `ip-sysctl.txt `_. // // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; // Specifies the intended direction of the traffic relative to the local Envoy. core.TrafficDirection traffic_direction = 16; // If the protocol in the listener socket address in :ref:`protocol // ` is :ref:`UDP // `, this field specifies the actual udp // listener to create, i.e. :ref:`udp_listener_name // ` = "raw_udp_listener" for // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". listener.UdpListenerConfig udp_listener_config = 18; // Used to represent an API listener, which is used in non-proxy clients. The type of API // exposed to the non-proxy application depends on the type of API listener. // When this field is set, no other field except for :ref:`name` // should be set. // // .. note:: // // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, // not LDS. // // [#next-major-version: In the v3 API, instead of this messy approach where the socket // listener fields are directly in the top-level Listener message and the API listener types // are in the ApiListener message, the socket listener messages should be in their own message, // and the top-level Listener should essentially be a oneof that selects between the // socket listener and the various types of API listener. That way, a given Listener message // can structurally only contain the fields of the relevant type.] config.listener.v2.ApiListener api_listener = 19; // The listener's connection balancer configuration, currently only applicable to TCP listeners. // If no configuration is specified, Envoy will not attempt to balance active connections between // worker threads. ConnectionBalanceConfig connection_balance_config = 20; // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and // create one socket for each worker thread. This makes inbound connections // distribute among worker threads roughly evenly in cases where there are a high number // of connections. When this flag is set to false, all worker threads share one socket. // // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart // (see `3rd paragraph in 'soreuseport' commit message // `_). // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket // `_. bool reuse_port = 21; // Configuration for :ref:`access logs ` // emitted by this listener. repeated config.filter.accesslog.v2.AccessLog access_log = 22; } ================================================ FILE: api/envoy/api/v2/ratelimit/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/api/v2/ratelimit/ratelimit.proto ================================================ syntax = "proto3"; package envoy.api.v2.ratelimit; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.ratelimit"; option java_outer_classname = "RatelimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.ratelimit.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common rate limit components] // A RateLimitDescriptor is a list of hierarchical entries that are used by the service to // determine the final rate limit key and overall allowed limit. Here are some examples of how // they might be used for the domain "envoy". // // .. code-block:: cpp // // ["authenticated": "false"], ["remote_address": "10.0.0.1"] // // What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The // configuration supplies a default limit for the *remote_address* key. If there is a desire to // raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the // configuration. // // .. code-block:: cpp // // ["authenticated": "false"], ["path": "/foo/bar"] // // What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if // configured that way in the service). // // .. code-block:: cpp // // ["authenticated": "false"], ["path": "/foo/bar"], ["remote_address": "10.0.0.1"] // // What it does: Limits unauthenticated traffic to a specific path for a specific IP address. // Like (1) we can raise/block specific IP addresses if we want with an override configuration. // // .. code-block:: cpp // // ["authenticated": "true"], ["client_id": "foo"] // // What it does: Limits all traffic for an authenticated client "foo" // // .. code-block:: cpp // // ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] // // What it does: Limits traffic to a specific path for an authenticated client "foo" // // The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. // This enables building complex application scenarios with a generic backend. message RateLimitDescriptor { message Entry { // Descriptor key. string key = 1 [(validate.rules).string = {min_bytes: 1}]; // Descriptor value. string value = 2 [(validate.rules).string = {min_bytes: 1}]; } // Descriptor entries. repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/api/v2/rds.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; import public "envoy/api/v2/route.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "RdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: RDS] // The resource_names field in DiscoveryRequest specifies a route configuration. // This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. Each listener will bind its HTTP connection manager filter to // a route table via this identifier. service RouteDiscoveryService { option (envoy.annotations.resource).type = "envoy.api.v2.RouteConfiguration"; rpc StreamRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } rpc DeltaRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc FetchRoutes(DiscoveryRequest) returns (DiscoveryResponse) { option (google.api.http).post = "/v2/discovery:routes"; option (google.api.http).body = "*"; } } // Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for // a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered // during the processing of an HTTP request if a route for the request cannot be resolved. The // :ref:`resource_names_subscribe ` // field contains a list of virtual host names or aliases to track. The contents of an alias would // be the contents of a *host* or *authority* header used to make an http request. An xDS server // will match an alias to a virtual host based on the content of :ref:`domains' // ` field. The *resource_names_unsubscribe* field // contains a list of virtual host names that have been :ref:`unsubscribed // ` from the routing table associated with the RouteConfiguration. service VirtualHostDiscoveryService { option (envoy.annotations.resource).type = "envoy.api.v2.route.VirtualHost"; rpc DeltaVirtualHosts(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message RdsDummy { } ================================================ FILE: api/envoy/api/v2/route/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2/core:pkg", "//envoy/type:pkg", "//envoy/type/matcher:pkg", "//envoy/type/tracing/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/api/v2/route/route.proto ================================================ syntax = "proto3"; package envoy.api.v2.route; import "udpa/annotations/status.proto"; import public "envoy/api/v2/route/route_components.proto"; option java_package = "io.envoyproxy.envoy.api.v2.route"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; ================================================ FILE: api/envoy/api/v2/route/route_components.proto ================================================ syntax = "proto3"; package envoy.api.v2.route; import "envoy/api/v2/core/base.proto"; import "envoy/type/matcher/regex.proto"; import "envoy/type/matcher/string.proto"; import "envoy/type/percent.proto"; import "envoy/type/range.proto"; import "envoy/type/tracing/v2/custom_tag.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2.route"; option java_outer_classname = "RouteComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP route components] // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` // The top level element in the routing configuration is a virtual host. Each virtual host has // a logical name as well as a set of domains that get routed to it based on the incoming request's // host header. This allows a single listener to service multiple top level domain path trees. Once // a virtual host is selected based on the domain, the routes are processed in order to see which // upstream cluster to route to or whether to perform a redirect. // [#next-free-field: 21] message VirtualHost { enum TlsRequirementType { // No TLS requirement for the virtual host. NONE = 0; // External requests must use TLS. If a request is external and it is not // using TLS, a 301 redirect will be sent telling the client to use HTTPS. EXTERNAL_ONLY = 1; // All requests must use TLS. If a request is not using TLS, a 301 redirect // will be sent telling the client to use HTTPS. ALL = 2; } reserved 9; // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. // // Domain search order: // 1. Exact domain names: ``www.foo.com``. // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. // 4. Special wildcard ``*`` matching any domain. // // .. note:: // // The wildcard will not match the empty string. // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. // The longest wildcards match first. // Only a single virtual host in the entire route configuration can match on ``*``. A domain // must be unique across all virtual hosts or the config will fail to load. // // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. repeated string domains = 2 [(validate.rules).repeated = { min_items: 1 items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} }]; // The list of routes that will be matched, in order, for incoming requests. // The first route that matches will be used. repeated Route routes = 3; // Specifies the type of TLS enforcement the virtual host expects. If this option is not // specified, there is no TLS requirement for the virtual host. TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; // A list of virtual clusters defined for this virtual host. Virtual clusters // are used for additional statistics gathering. repeated VirtualCluster virtual_clusters = 5; // Specifies a set of rate limit configurations that will be applied to the // virtual host. repeated RateLimit rate_limits = 6; // Specifies a list of HTTP headers that should be added to each request // handled by this virtual host. Headers specified at this level are applied // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 7 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. repeated string request_headers_to_remove = 13; // Specifies a list of HTTP headers that should be added to each response // handled by this virtual host. Headers specified at this level are applied // after headers from enclosed :ref:`envoy_api_msg_route.Route` and before headers from the // enclosing :ref:`envoy_api_msg_RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 10 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. repeated string response_headers_to_remove = 11; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; // The per_filter_config field can be used to provide virtual host-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. map per_filter_config = 12 [deprecated = true]; // The per_filter_config field can be used to provide virtual host-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. map typed_per_filter_config = 15; // Decides whether the :ref:`x-envoy-attempt-count // ` header should be included // in the upstream request. Setting this option will cause it to override any existing header // value, so in the case of two Envoys on the request path with this option enabled, the upstream // will see the attempt count as perceived by the second Envoy. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers // ` flag. // // [#next-major-version: rename to include_attempt_count_in_request.] bool include_request_attempt_count = 14; // Decides whether the :ref:`x-envoy-attempt-count // ` header should be included // in the downstream response. Setting this option will cause the router to override any existing header // value, so in the case of two Envoys on the request path with this option enabled, the downstream // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers // ` flag. bool include_attempt_count_in_response = 19; // Indicates the retry policy for all routes in this virtual host. Note that setting a // route level entry will take precedence over this config and it'll be treated // independently (e.g.: values are not inherited). RetryPolicy retry_policy = 16; // [#not-implemented-hide:] // Specifies the configuration for retry policy extension. Note that setting a route level entry // will take precedence over this config and it'll be treated independently (e.g.: values are not // inherited). :ref:`Retry policy ` should not be // set if this field is used. google.protobuf.Any retry_policy_typed_config = 20; // Indicates the hedge policy for all routes in this virtual host. Note that setting a // route level entry will take precedence over this config and it'll be treated // independently (e.g.: values are not inherited). HedgePolicy hedge_policy = 17; // The maximum bytes which will be buffered for retries and shadowing. // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum // value of this and the listener per_connection_buffer_limit_bytes. google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; } // A filter-defined action type. message FilterAction { google.protobuf.Any action = 1; } // A route is both a specification of how to match a request as well as an indication of what to do // next (e.g., redirect, forward, rewrite, etc.). // // .. attention:: // // Envoy supports routing on HTTP method via :ref:`header matching // `. // [#next-free-field: 18] message Route { reserved 6; // Name for the route. string name = 14; // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; oneof action { option (validate.required) = true; // Route request to some upstream cluster. RouteAction route = 2; // Return a redirect. RedirectAction redirect = 3; // Return an arbitrary HTTP response directly, without proxying. DirectResponseAction direct_response = 7; // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). FilterAction filter_action = 17; } // The Metadata field can be used to provide additional information // about the route. It can be used for configuration, stats, and logging. // The metadata should go under the filter namespace that will need it. // For instance, if the metadata is intended for the Router filter, // the filter name should be specified as *envoy.filters.http.router*. core.Metadata metadata = 4; // Decorator for the matched route. Decorator decorator = 5; // The per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` for // if and how it is utilized. map per_filter_config = 8 [deprecated = true]; // The typed_per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` for // if and how it is utilized. map typed_per_filter_config = 13; // Specifies a set of headers that will be added to requests matching this // route. Headers specified at this level are applied before headers from the // enclosing :ref:`envoy_api_msg_route.VirtualHost` and // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 9 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // matching this route. repeated string request_headers_to_remove = 12; // Specifies a set of headers that will be added to responses to requests // matching this route. Headers specified at this level are applied before // headers from the enclosing :ref:`envoy_api_msg_route.VirtualHost` and // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on // :ref:`custom request headers `. repeated core.HeaderValueOption response_headers_to_add = 10 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. repeated string response_headers_to_remove = 11; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. Tracing tracing = 15; // The maximum bytes which will be buffered for retries and shadowing. // If set, the bytes actually buffered will be the minimum value of this and the // listener per_connection_buffer_limit_bytes. google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; } // Compared to the :ref:`cluster ` field that specifies a // single upstream cluster as the target of a request, the :ref:`weighted_clusters // ` option allows for specification of // multiple upstream clusters along with weights that indicate the percentage of // traffic to be forwarded to each cluster. The router selects an upstream cluster based on the // weights. message WeightedCluster { // [#next-free-field: 11] message ClusterWeight { reserved 7; // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, // the choice of an upstream cluster is determined by its weight. The sum of weights across all // entries in the clusters array must add up to the total_weight, which defaults to 100. google.protobuf.UInt32Value weight = 2; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field will be considered for // load balancing. Note that this will be merged with what's provided in // :ref:`RouteAction.metadata_match `, with // values here taking precedence. The filter name should be specified as *envoy.lb*. core.Metadata metadata_match = 3; // Specifies a list of headers to be added to requests when this cluster is selected // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. // Headers specified at this level are applied before headers from the enclosing // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 4 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_route.RouteAction`. repeated string request_headers_to_remove = 9; // Specifies a list of headers to be added to responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. // Headers specified at this level are applied before headers from the enclosing // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_route.VirtualHost`, and // :ref:`envoy_api_msg_RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 5 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_route.RouteAction`. repeated string response_headers_to_remove = 6; // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. map per_filter_config = 8 [deprecated = true]; // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. map typed_per_filter_config = 10; } // Specifies one or more upstream clusters associated with the route. repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; // Specifies the total weight across all clusters. The sum of all cluster weights must equal this // value, which must be greater than 0. Defaults to 100. google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; // Specifies the runtime key prefix that should be used to construct the // runtime keys associated with each cluster. When the *runtime_key_prefix* is // specified, the router will look for weights associated with each upstream // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where // *cluster[i]* denotes an entry in the clusters array field. If the runtime // key for the cluster does not exist, the value specified in the // configuration file will be used as the default weight. See the :ref:`runtime documentation // ` for how key names map to the underlying implementation. string runtime_key_prefix = 2; } // [#next-free-field: 12] message RouteMatch { message GrpcRouteMatchOptions { } message TlsContextMatchOptions { // If specified, the route will match against whether or not a certificate is presented. // If not specified, certificate presentation status (true or false) will not be considered when route matching. google.protobuf.BoolValue presented = 1; // If specified, the route will match against whether or not a certificate is validated. // If not specified, certificate validation status (true or false) will not be considered when route matching. google.protobuf.BoolValue validated = 2; } reserved 5; oneof path_specifier { option (validate.required) = true; // If specified, the route is a prefix rule meaning that the prefix must // match the beginning of the *:path* header. string prefix = 1; // If specified, the route is an exact path rule meaning that the path must // exactly match the *:path* header once the query string is removed. string path = 2; // If specified, the route is a regular expression rule meaning that the // regex must match the *:path* header once the query string is removed. The entire path // (without the query string) must match the regex. The rule will not match if only a // subsequence of the *:path* header matches the regex. The regex grammar is defined `here // `_. // // Examples: // // * The regex ``/b[io]t`` matches the path */bit* // * The regex ``/b[io]t`` matches the path */bot* // * The regex ``/b[io]t`` does not match the path */bite* // * The regex ``/b[io]t`` does not match the path */bit/bot* // // .. attention:: // This field has been deprecated in favor of `safe_regex` as it is not safe for use with // untrusted input in all cases. string regex = 3 [ deprecated = true, (validate.rules).string = {max_bytes: 1024}, (envoy.annotations.disallowed_by_default) = true ]; // If specified, the route is a regular expression rule meaning that the // regex must match the *:path* header once the query string is removed. The entire path // (without the query string) must match the regex. The rule will not match if only a // subsequence of the *:path* header matches the regex. // // [#next-major-version: In the v3 API we should redo how path specification works such // that we utilize StringMatcher, and additionally have consistent options around whether we // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive // to deprecate the existing options. We should even consider whether we want to do away with // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] type.matcher.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; } // Indicates that prefix/path matching should be case sensitive. The default // is true. google.protobuf.BoolValue case_sensitive = 4; // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the // number is <= the value of the numerator N, or if the key is not present, the default // value, the router continues to evaluate the remaining match criteria. A runtime_fraction // route configuration can be used to roll out route changes in a gradual manner without full // code/config deploys. Refer to the :ref:`traffic shifting // ` docs for additional documentation. // // .. note:: // // Parsing this field is implemented such that the runtime key's data may be represented // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an // integer with the assumption that the value is an integral percentage out of 100. For // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. core.RuntimeFractionalPercent runtime_fraction = 9; // Specifies a set of headers that the route should match on. The router will // check the request’s headers against all the specified headers in the route // config. A match will happen if all the headers in the route are present in // the request with the same values (or based on presence if the value field // is not in the config). repeated HeaderMatcher headers = 6; // Specifies a set of URL query parameters on which the route should // match. The router will check the query string from the *path* header // against all the specified query parameters. If the number of specified // query parameters is nonzero, they all must match the *path* header's // query string for a match to occur. repeated QueryParameterMatcher query_parameters = 7; // If specified, only gRPC requests will be matched. The router will check // that the content-type header has a application/grpc or one of the various // application/grpc+ values. GrpcRouteMatchOptions grpc = 8; // If specified, the client tls context will be matched against the defined // match options. // // [#next-major-version: unify with RBAC] TlsContextMatchOptions tls_context = 11; } // [#next-free-field: 12] message CorsPolicy { // Specifies the origins that will be allowed to do CORS requests. // // An origin is allowed if either allow_origin or allow_origin_regex match. // // .. attention:: // This field has been deprecated in favor of `allow_origin_string_match`. repeated string allow_origin = 1 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // Specifies regex patterns that match allowed origins. // // An origin is allowed if either allow_origin or allow_origin_regex match. // // .. attention:: // This field has been deprecated in favor of `allow_origin_string_match` as it is not safe for // use with untrusted input in all cases. repeated string allow_origin_regex = 8 [deprecated = true, (validate.rules).repeated = {items {string {max_bytes: 1024}}}]; // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. repeated type.matcher.StringMatcher allow_origin_string_match = 11; // Specifies the content for the *access-control-allow-methods* header. string allow_methods = 2; // Specifies the content for the *access-control-allow-headers* header. string allow_headers = 3; // Specifies the content for the *access-control-expose-headers* header. string expose_headers = 4; // Specifies the content for the *access-control-max-age* header. string max_age = 5; // Specifies whether the resource allows credentials. google.protobuf.BoolValue allow_credentials = 6; oneof enabled_specifier { // Specifies if the CORS filter is enabled. Defaults to true. Only effective on route. // // .. attention:: // // **This field is deprecated**. Set the // :ref:`filter_enabled` field instead. google.protobuf.BoolValue enabled = 7 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // Specifies the % of requests for which the CORS filter is enabled. // // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS // filter will be enabled for 100% of the requests. // // If :ref:`runtime_key ` is // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. core.RuntimeFractionalPercent filter_enabled = 9; } // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not // enforced. // // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those // fields have to explicitly disable the filter in order for this setting to take effect. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate // and track the request's *Origin* to determine if it's valid but will not enforce any policies. core.RuntimeFractionalPercent shadow_enabled = 10; } // [#next-free-field: 34] message RouteAction { enum ClusterNotFoundResponseCode { // HTTP status code - 503 Service Unavailable. SERVICE_UNAVAILABLE = 0; // HTTP status code - 404 Not Found. NOT_FOUND = 1; } // Configures :ref:`internal redirect ` behavior. enum InternalRedirectAction { PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; } // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are // collected for the shadow cluster making this feature useful for testing. // // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. // // .. note:: // // Shadowing will not be triggered if the primary cluster does not exist. message RequestMirrorPolicy { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // If not specified, all requests to the target cluster will be mirrored. If // specified, Envoy will lookup the runtime key to get the % of requests to // mirror. Valid values are from 0 to 10000, allowing for increments of // 0.01% of requests to be mirrored. If the runtime key is specified in the // configuration but not present in runtime, 0 is the default and thus 0% of // requests will be mirrored. // // .. attention:: // // **This field is deprecated**. Set the // :ref:`runtime_fraction // ` // field instead. Mirroring occurs if both this and // ` // are not set. string runtime_key = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // If not specified, all requests to the target cluster will be mirrored. // // If specified, this field takes precedence over the `runtime_key` field and requests must also // fall under the percentage of matches indicated by this field. // // For some fraction N/D, a random number in the range [0,D) is selected. If the // number is <= the value of the numerator N, or if the key is not present, the default // value, the request will be mirrored. core.RuntimeFractionalPercent runtime_fraction = 3; // Determines if the trace span should be sampled. Defaults to true. google.protobuf.BoolValue trace_sampled = 4; } // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer // `. // [#next-free-field: 7] message HashPolicy { message Header { // The name of the request header that will be used to obtain the hash // key. If the request header is not present, no hash will be produced. string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; } // Envoy supports two types of cookie affinity: // // 1. Passive. Envoy takes a cookie that's present in the cookies header and // hashes on its value. // // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) // on the first request from the client in its response to the client, // based on the endpoint the request gets sent to. The client then // presents this on the next and all subsequent requests. The hash of // this is sufficient to ensure these requests get sent to the same // endpoint. The cookie is generated by hashing the source and // destination ports and addresses so that multiple independent HTTP2 // streams on the same connection will independently receive the same // cookie, even if they arrive at the Envoy simultaneously. message Cookie { // The name of the cookie that will be used to obtain the hash key. If the // cookie is not present and ttl below is not set, no hash will be // produced. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // If specified, a cookie with the TTL will be generated if the cookie is // not present. If the TTL is present and zero, the generated cookie will // be a session cookie. google.protobuf.Duration ttl = 2; // The name of the path for the cookie. If no path is specified here, no path // will be set for the cookie. string path = 3; } message ConnectionProperties { // Hash on source IP address. bool source_ip = 1; } message QueryParameter { // The name of the URL query parameter that will be used to obtain the hash // key. If the parameter is not present, no hash will be produced. Query // parameter names are case-sensitive. string name = 1 [(validate.rules).string = {min_bytes: 1}]; } message FilterState { // The name of the Object in the per-request filterState, which is an // Envoy::Http::Hashable object. If there is no data associated with the key, // or the stored object is not Envoy::Http::Hashable, no hash will be produced. string key = 1 [(validate.rules).string = {min_bytes: 1}]; } oneof policy_specifier { option (validate.required) = true; // Header hash policy. Header header = 1; // Cookie hash policy. Cookie cookie = 2; // Connection properties hash policy. ConnectionProperties connection_properties = 3; // Query parameter hash policy. QueryParameter query_parameter = 5; // Filter state hash policy. FilterState filter_state = 6; } // The flag that short-circuits the hash computing. This field provides a // 'fallback' style of configuration: "if a terminal policy doesn't work, // fallback to rest of the policy list", it saves time when the terminal // policy works. // // If true, and there is already a hash computed, ignore rest of the // list of hash polices. // For example, if the following hash methods are configured: // // ========= ======== // specifier terminal // ========= ======== // Header A true // Header B false // Header C false // ========= ======== // // The generateHash process ends if policy "header A" generates a hash, as // it's a terminal policy. bool terminal = 4; } // Allows enabling and disabling upgrades on a per-route basis. // This overrides any enabled/disabled upgrade filter chain specified in the // HttpConnectionManager // :ref:`upgrade_configs // ` // but does not affect any custom filter chain specified there. message UpgradeConfig { // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. string upgrade_type = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; } reserved 12, 18, 19, 16, 22, 21; oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed // to. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the // header is not found or the referenced cluster does not exist, Envoy will // return a 404 response. // // .. attention:: // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. string cluster_header = 2 [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. See // :ref:`traffic splitting ` // for additional documentation. WeightedCluster weighted_clusters = 3; } // The HTTP status code to use when configured cluster is not found. // The default response code is 503 Service Unavailable. ClusterNotFoundResponseCode cluster_not_found_response_code = 20 [(validate.rules).enum = {defined_only: true}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what's set in this field will be considered // for load balancing. If using :ref:`weighted_clusters // `, metadata will be merged, with values // provided there taking precedence. The filter name should be specified as *envoy.lb*. core.Metadata metadata_match = 4; // Indicates that during forwarding, the matched prefix (or path) should be // swapped with this value. This option allows application URLs to be rooted // at a different path from those exposed at the reverse proxy layer. The router filter will // place the original path before rewrite into the :ref:`x-envoy-original-path // ` header. // // Only one of *prefix_rewrite* or // :ref:`regex_rewrite ` // may be specified. // // .. attention:: // // Pay careful attention to the use of trailing slashes in the // :ref:`route's match ` prefix value. // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single // :ref:`Route `, as shown by the below config entries: // // .. code-block:: yaml // // - match: // prefix: "/prefix/" // route: // prefix_rewrite: "/" // - match: // prefix: "/prefix" // route: // prefix_rewrite: "/" // // Having above entries in the config, requests to */prefix* will be stripped to */*, while // requests to */prefix/etc* will be stripped to */etc*. string prefix_rewrite = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during forwarding, portions of the path that match the // pattern should be rewritten, even allowing the substitution of capture // groups from the pattern into the new path as specified by the rewrite // substitution string. This is useful to allow application paths to be // rewritten in a way that is aware of segments with variable content like // identifiers. The router filter will place the original path as it was // before the rewrite into the :ref:`x-envoy-original-path // ` header. // // Only one of :ref:`prefix_rewrite ` // or *regex_rewrite* may be specified. // // Examples using Google's `RE2 `_ engine: // // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` // into ``/v1/api/instance/foo``. // // * The pattern ``one`` paired with a substitution string of ``two`` would // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. // // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of // ``\1two\2`` would replace only the first occurrence of ``one``, // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. // // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. type.matcher.RegexMatchAndSubstitute regex_rewrite = 32; oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with // this value. string host_rewrite = 6 [ (validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}, (udpa.annotations.field_migrate).rename = "host_rewrite_literal" ]; // Indicates that during forwarding, the host header will be swapped with // the hostname of the upstream host chosen by the cluster manager. This // option is applicable only when the destination cluster for a route is of // type *strict_dns* or *logical_dns*. Setting this to true with other cluster // types has no effect. google.protobuf.BoolValue auto_host_rewrite = 7; // Indicates that during forwarding, the host header will be swapped with the content of given // downstream or :ref:`custom ` header. // If header value is empty, host header is left intact. // // .. attention:: // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. string auto_host_rewrite_header = 29 [ (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, (udpa.annotations.field_migrate).rename = "host_rewrite_header" ]; } // Specifies the upstream timeout for the route. If not specified, the default is 15s. This // spans between the point at which the entire downstream request (i.e. end-of-stream) has been // processed and when the upstream response has been completely processed. A value of 0 will // disable the route's timeout. // // .. note:: // // This timeout includes all retries. See also // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. google.protobuf.Duration timeout = 8; // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, // although the connection manager wide :ref:`stream_idle_timeout // ` // will still apply. A value of 0 will completely disable the route's idle timeout, even if a // connection manager stream idle timeout is configured. // // The idle timeout is distinct to :ref:`timeout // `, which provides an upper bound // on the upstream response time; :ref:`idle_timeout // ` instead bounds the amount // of time the request's stream may be idle. // // After header decoding, the idle timeout will apply on downstream and // upstream request events. Each time an encode/decode event for headers or // data is processed for the stream, the timer will be reset. If the timeout // fires, the stream is terminated with a 408 Request Timeout error code if no // upstream response header has been received, otherwise a stream reset // occurs. google.protobuf.Duration idle_timeout = 24; // Indicates that the route has a retry policy. Note that if this is set, // it'll take precedence over the virtual host level retry policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). RetryPolicy retry_policy = 9; // [#not-implemented-hide:] // Specifies the configuration for retry policy extension. Note that if this is set, it'll take // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, // most internal one becomes the enforced policy). :ref:`Retry policy ` // should not be set if this field is used. google.protobuf.Any retry_policy_typed_config = 33; // Indicates that the route has a request mirroring policy. // // .. attention:: // This field has been deprecated in favor of `request_mirror_policies` which supports one or // more mirroring policies. RequestMirrorPolicy request_mirror_policy = 10 [deprecated = true]; // Indicates that the route has request mirroring policies. repeated RequestMirrorPolicy request_mirror_policies = 30; // Optionally specifies the :ref:`routing priority `. core.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; // Specifies a set of rate limit configurations that could be applied to the // route. repeated RateLimit rate_limits = 13; // Specifies if the rate limit filter should include the virtual host rate // limits. By default, if the route configured rate limits, the virtual host // :ref:`rate_limits ` are not applied to the // request. google.protobuf.BoolValue include_vh_rate_limits = 14; // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to // route the request. The method of combination is deterministic such that // identical lists of hash policies will produce the same hash. Since a hash // policy examines specific parts of a request, it can fail to produce a hash // (i.e. if the hashed header is not present). If (and only if) all configured // hash policies fail to generate a hash, no hash will be produced for // the route. In this case, the behavior is the same as if no hash policies // were specified (i.e. the ring hash load balancer will choose a random // backend). If a hash policy has the "terminal" attribute set to true, and // there is already a hash generated, the hash is returned immediately, // ignoring the rest of the hash policy list. repeated HashPolicy hash_policy = 15; // Indicates that the route has a CORS policy. CorsPolicy cors = 17; // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, // or its default value (infinity) instead of // :ref:`timeout `, but limit the applied timeout // to the maximum value specified here. If configured as 0, the maximum allowed timeout for // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used // and gRPC requests time out like any other requests using // :ref:`timeout ` or its default. // This can be used to prevent unexpected upstream request timeouts due to potentially long // time gaps between gRPC request and response in gRPC streaming mode. // // .. note:: // // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes // precedence over `grpc-timeout header `_, when // both are present. See also // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. google.protobuf.Duration max_grpc_timeout = 23; // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting // the provided duration from the header. This is useful in allowing Envoy to set its global // timeout to be less than that of the deadline imposed by the calling client, which makes it more // likely that Envoy will handle the timeout instead of having the call canceled by the client. // The offset will only be applied if the provided grpc_timeout is greater than the offset. This // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning // infinity). google.protobuf.Duration grpc_timeout_offset = 28; repeated UpgradeConfig upgrade_configs = 25; InternalRedirectAction internal_redirect_action = 26; // An internal redirect is handled, iff the number of previous internal redirects that a // downstream request has encountered is lower than this value, and // :ref:`internal_redirect_action ` // is set to :ref:`HANDLE_INTERNAL_REDIRECT // ` // In the case where a downstream request is bounced among multiple routes by internal redirect, // the first route that hits this threshold, or has // :ref:`internal_redirect_action ` // set to // :ref:`PASS_THROUGH_INTERNAL_REDIRECT // ` // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. google.protobuf.UInt32Value max_internal_redirects = 31; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). HedgePolicy hedge_policy = 27; } // HTTP retry :ref:`architecture overview `. // [#next-free-field: 11] message RetryPolicy { message RetryPriority { string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } message RetryHostPredicate { string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } message RetryBackOff { // Specifies the base interval between retries. This parameter is required and must be greater // than zero. Values less than 1 ms are rounded up to 1 ms. // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's // back-off algorithm. google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { required: true gt {} }]; // Specifies the maximum interval between retries. This parameter is optional, but must be // greater than or equal to the `base_interval` if set. The default is 10 times the // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion // of Envoy's back-off algorithm. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; } // Specifies the conditions under which retry takes place. These are the same // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. string retry_on = 1; // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. google.protobuf.UInt32Value num_retries = 2; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. // // .. note:: // // If left unspecified, Envoy will use the global // :ref:`route timeout ` for the request. // Consequently, when using a :ref:`5xx ` based // retry policy, a request that times out will not be retried as the total timeout budget // would have been exhausted. google.protobuf.Duration per_try_timeout = 3; // Specifies an implementation of a RetryPriority which is used to determine the // distribution of load across priorities used for retries. Refer to // :ref:`retry plugin configuration ` for more details. RetryPriority retry_priority = 4; // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host // for retries. If any of the predicates reject the host, host selection will be reattempted. // Refer to :ref:`retry plugin configuration ` for more // details. repeated RetryHostPredicate retry_host_predicate = 5; // The maximum number of times host selection will be reattempted before giving up, at which // point the host that was last selected will be routed to. If unspecified, this will default to // retrying once. int64 host_selection_retry_max_attempts = 6; // HTTP status codes that should trigger a retry in addition to those specified by retry_on. repeated uint32 retriable_status_codes = 7; // Specifies parameters that control retry back off. This parameter is optional, in which case the // default base interval is 25 milliseconds or, if set, the current value of the // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` // describes Envoy's back-off algorithm. RetryBackOff retry_back_off = 8; // HTTP response headers that trigger a retry if present in the response. A retry will be // triggered if any of the header matches match the upstream response headers. // The field is only consulted if 'retriable-headers' retry policy is active. repeated HeaderMatcher retriable_headers = 9; // HTTP headers which must be present in the request for retries to be attempted. repeated HeaderMatcher retriable_request_headers = 10; } // HTTP request hedging :ref:`architecture overview `. message HedgePolicy { // Specifies the number of initial requests that should be sent upstream. // Must be at least 1. // Defaults to 1. // [#not-implemented-hide:] google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; // Specifies a probability that an additional upstream request should be sent // on top of what is specified by initial_requests. // Defaults to 0. // [#not-implemented-hide:] type.FractionalPercent additional_request_chance = 2; // Indicates that a hedged request should be sent when the per-try timeout // is hit. This will only occur if the retry policy also indicates that a // timed out request should be retried. // Once a timed out request is retried due to per try timeout, the router // filter will ensure that it is not retried again even if the returned // response headers would otherwise be retried according the specified // :ref:`RetryPolicy `. // Defaults to false. bool hedge_on_per_try_timeout = 3; } // [#next-free-field: 9] message RedirectAction { enum RedirectResponseCode { // Moved Permanently HTTP Status Code - 301. MOVED_PERMANENTLY = 0; // Found HTTP Status Code - 302. FOUND = 1; // See Other HTTP Status Code - 303. SEE_OTHER = 2; // Temporary Redirect HTTP Status Code - 307. TEMPORARY_REDIRECT = 3; // Permanent Redirect HTTP Status Code - 308. PERMANENT_REDIRECT = 4; } // When the scheme redirection take place, the following rules apply: // 1. If the source URI scheme is `http` and the port is explicitly // set to `:80`, the port will be removed after the redirection // 2. If the source URI scheme is `https` and the port is explicitly // set to `:443`, the port will be removed after the redirection oneof scheme_rewrite_specifier { // The scheme portion of the URL will be swapped with "https". bool https_redirect = 4; // The scheme portion of the URL will be swapped with this value. string scheme_redirect = 7; } // The host portion of the URL will be swapped with this value. string host_redirect = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The port value of the URL will be swapped with this value. uint32 port_redirect = 8; oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. // Please note that query string in path_redirect will override the // request's query string and will not be stripped. // // For example, let's say we have the following routes: // // - match: { path: "/old-path-1" } // redirect: { path_redirect: "/new-path-1" } // - match: { path: "/old-path-2" } // redirect: { path_redirect: "/new-path-2", strip-query: "true" } // - match: { path: "/old-path-3" } // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } // // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during redirection, the matched prefix (or path) // should be swapped with this value. This option allows redirect URLs be dynamically created // based on the request. // // .. attention:: // // Pay attention to the use of trailing slashes as mentioned in // :ref:`RouteAction's prefix_rewrite `. string prefix_rewrite = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // The HTTP status code to use in the redirect response. The default response // code is MOVED_PERMANENTLY (301). RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; // Indicates that during redirection, the query portion of the URL will // be removed. Default value is false. bool strip_query = 6; } message DirectResponseAction { // Specifies the HTTP response status to be returned. uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; // Specifies the content of the response body. If this setting is omitted, // no body is included in the generated response. // // .. note:: // // Headers can be specified using *response_headers_to_add* in the enclosing // :ref:`envoy_api_msg_route.Route`, :ref:`envoy_api_msg_RouteConfiguration` or // :ref:`envoy_api_msg_route.VirtualHost`. core.DataSource body = 2; } message Decorator { // The operation name associated with the request matched to this route. If tracing is // enabled, this information will be used as the span name reported for this request. // // .. note:: // // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. string operation = 1 [(validate.rules).string = {min_bytes: 1}]; // Whether the decorated details should be propagated to the other party. The default is true. google.protobuf.BoolValue propagate = 2; } message Tracing { // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% type.FractionalPercent client_sampling = 1; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.FractionalPercent random_sampling = 2; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random // sampling). This field functions as an upper limit on the total configured sampling rate. For // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% // of client requests with the appropriate headers to be force traced. This field is a direct // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.FractionalPercent overall_sampling = 3; // A list of custom tags with unique tag name to create tags for the active span. // It will take effect after merging with the :ref:`corresponding configuration // ` // configured in the HTTP connection manager. If two tags with the same name are configured // each in the HTTP connection manager and the route level, the one configured here takes // priority. repeated type.tracing.v2.CustomTag custom_tags = 4; } // A virtual cluster is a way of specifying a regex matching rule against // certain important endpoints such that statistics are generated explicitly for // the matched requests. The reason this is useful is that when doing // prefix/path matching Envoy does not always know what the application // considers to be an endpoint. Thus, it’s impossible for Envoy to generically // emit per endpoint statistics. However, often systems have highly critical // endpoints that they wish to get “perfect” statistics on. Virtual cluster // statistics are perfect in the sense that they are emitted on the downstream // side such that they include network level failures. // // Documentation for :ref:`virtual cluster statistics `. // // .. note:: // // Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for // every application endpoint. This is both not easily maintainable and as well the matching and // statistics output are not free. message VirtualCluster { // Specifies a regex pattern to use for matching requests. The entire path of the request // must match the regex. The regex grammar used is defined `here // `_. // // Examples: // // * The regex ``/rides/\d+`` matches the path */rides/0* // * The regex ``/rides/\d+`` matches the path */rides/123* // * The regex ``/rides/\d+`` does not match the path */rides/123/456* // // .. attention:: // This field has been deprecated in favor of `headers` as it is not safe for use with // untrusted input in all cases. string pattern = 1 [ deprecated = true, (validate.rules).string = {max_bytes: 1024}, (envoy.annotations.disallowed_by_default) = true ]; // Specifies a list of header matchers to use for matching requests. Each specified header must // match. The pseudo-headers `:path` and `:method` can be used to match the request path and // method, respectively. repeated HeaderMatcher headers = 4; // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. string name = 2 [(validate.rules).string = {min_bytes: 1}]; // Optionally specifies the HTTP method to match on. For example GET, PUT, // etc. // // .. attention:: // This field has been deprecated in favor of `headers`. core.RequestMethod method = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; } // Global rate limiting :ref:`architecture overview `. message RateLimit { // [#next-free-field: 7] message Action { // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("source_cluster", "") // // is derived from the :option:`--service-cluster` option. message SourceCluster { } // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("destination_cluster", "") // // Once a request matches against a route table rule, a routed cluster is determined by one of // the following :ref:`route table configuration ` // settings: // // * :ref:`cluster ` indicates the upstream cluster // to route to. // * :ref:`weighted_clusters ` // chooses a cluster randomly from a set of clusters with attributed weight. // * :ref:`cluster_header ` indicates which // header in the request contains the target cluster. message DestinationCluster { } // The following descriptor entry is appended when a header contains a key that matches the // *header_name*: // // .. code-block:: cpp // // ("", "") message RequestHeaders { // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. string header_name = 1 [ (validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false} ]; // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_bytes: 1}]; } // The following descriptor entry is appended to the descriptor and is populated using the // trusted address from :ref:`x-forwarded-for `: // // .. code-block:: cpp // // ("remote_address", "") message RemoteAddress { } // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("generic_key", "") message GenericKey { // The value to use in the descriptor entry. string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; } // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("header_match", "") message HeaderValueMatch { // The value to use in the descriptor entry. string descriptor_value = 1 [(validate.rules).string = {min_bytes: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a // descriptor entry when the request does not match the headers. The // default value is true. google.protobuf.BoolValue expect_match = 2; // Specifies a set of headers that the rate limit action should match // on. The action will check the request’s headers against all the // specified headers in the config. A match will happen if all the // headers in the config are present in the request with the same values // (or based on presence if the value field is not in the config). repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } oneof action_specifier { option (validate.required) = true; // Rate limit on source cluster. SourceCluster source_cluster = 1; // Rate limit on destination cluster. DestinationCluster destination_cluster = 2; // Rate limit on request headers. RequestHeaders request_headers = 3; // Rate limit on remote address. RemoteAddress remote_address = 4; // Rate limit on a generic key. GenericKey generic_key = 5; // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; } } // Refers to the stage set in the filter. The rate limit configuration only // applies to filters with the same stage number. The default stage number is // 0. // // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; // The key to be set in runtime to disable this rate limit configuration. string disable_key = 2; // A list of actions that are to be applied for this rate limit configuration. // Order matters as the actions are processed sequentially and the descriptor // is composed by appending descriptor entries in that sequence. If an action // cannot append a descriptor entry, no descriptor is generated for the // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; } // .. attention:: // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* // header. Thus, if attempting to match on *Host*, match on *:authority* instead. // // .. attention:: // // To route on HTTP method, use the special HTTP/2 *:method* header. This works for both // HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., // // .. code-block:: json // // { // "name": ":method", // "exact_match": "POST" // } // // .. attention:: // In the absence of any header match specifier, match will default to :ref:`present_match // `. i.e, a request that has the :ref:`name // ` header will match, regardless of the header's // value. // // [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] // [#next-free-field: 12] message HeaderMatcher { reserved 2, 3; // Specifies the name of the header in the request. string name = 1 [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { // If specified, header match will be performed based on the value of the header. string exact_match = 4; // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. The regex grammar used in the value field is defined // `here `_. // // Examples: // // * The regex ``\d{3}`` matches the value *123* // * The regex ``\d{3}`` does not match the value *1234* // * The regex ``\d{3}`` does not match the value *123.456* // // .. attention:: // This field has been deprecated in favor of `safe_regex_match` as it is not safe for use // with untrusted input in all cases. string regex_match = 5 [ deprecated = true, (validate.rules).string = {max_bytes: 1024}, (envoy.annotations.disallowed_by_default) = true ]; // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. type.matcher.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. // The entire request header value must represent an integer in base 10 notation: consisting of // an optional plus or minus sign followed by a sequence of digits. The rule will not match if // the header value does not represent an integer. Match will fail for empty values, floating // point numbers or if only a subsequence of the header value is an integer. // // Examples: // // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, // "-1somestring" type.Int64Range range_match = 6; // If specified, header match will be performed based on whether the header is in the // request. bool present_match = 7; // If specified, header match will be performed based on the prefix of the header value. // Note: empty prefix is not allowed, please use present_match instead. // // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. string prefix_match = 9 [(validate.rules).string = {min_bytes: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. // // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. string suffix_match = 10 [(validate.rules).string = {min_bytes: 1}]; } // If specified, the match result will be inverted before checking. Defaults to false. // // Examples: // // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. // * The range [-10,0) will match the value -1, so it will not match when inverted. bool invert_match = 8; } // Query parameter matching treats the query string of a request's :path header // as an ampersand-separated list of keys and/or key=value elements. // [#next-free-field: 7] message QueryParameterMatcher { // Specifies the name of a key that must be present in the requested // *path*'s query string. string name = 1 [(validate.rules).string = {min_bytes: 1 max_bytes: 1024}]; // Specifies the value of the key. If the value is absent, a request // that contains the key in its query string will match, whether the // key appears with a value (e.g., "?debug=true") or not (e.g., "?debug") // // ..attention:: // This field is deprecated. Use an `exact` match inside the `string_match` field. string value = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // Specifies whether the query parameter value is a regular expression. // Defaults to false. The entire query parameter value (i.e., the part to // the right of the equals sign in "key=value") must match the regex. // E.g., the regex ``\d+$`` will match *123* but not *a123* or *123a*. // // ..attention:: // This field is deprecated. Use a `safe_regex` match inside the `string_match` field. google.protobuf.BoolValue regex = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. type.matcher.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; } } ================================================ FILE: api/envoy/api/v2/route.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP route configuration] // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` // [#next-free-field: 11] message RouteConfiguration { // The name of the route configuration. For example, it might match // :ref:`route_config_name // ` in // :ref:`envoy_api_msg_config.filter.network.http_connection_manager.v2.Rds`. string name = 1; // An array of virtual hosts that make up the route table. repeated route.VirtualHost virtual_hosts = 2; // An array of virtual hosts will be dynamically loaded via the VHDS API. // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for // on-demand discovery of virtual hosts. The contents of these two fields will be merged to // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration // taking precedence. Vhds vhds = 9; // Optionally specifies a list of HTTP headers that the connection manager // will consider to be internal only. If they are found on external requests they will be cleaned // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information. repeated string internal_only_headers = 3 [ (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} ]; // Specifies a list of HTTP headers that should be added to each response that // the connection manager encodes. Headers specified at this level are applied // after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption response_headers_to_add = 4 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // that the connection manager encodes. repeated string response_headers_to_remove = 5 [ (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} ]; // Specifies a list of HTTP headers that should be added to each request // routed by the HTTP connection manager. Headers specified at this level are // applied after headers from any enclosed :ref:`envoy_api_msg_route.VirtualHost` or // :ref:`envoy_api_msg_route.RouteAction`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.HeaderValueOption request_headers_to_add = 6 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // routed by the HTTP connection manager. repeated string request_headers_to_remove = 8 [ (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} ]; // By default, headers that should be added/removed are evaluated from most to least specific: // // * route level // * virtual host level // * connection manager level // // To allow setting overrides at the route or virtual host level, this order can be reversed // by setting this option to true. Defaults to false. // // [#next-major-version: In the v3 API, this will default to true.] bool most_specific_header_mutations_wins = 10; // An optional boolean that specifies whether the clusters that the route // table refers to will be validated by the cluster manager. If set to true // and a route refers to a non-existent cluster, the route table will not // load. If set to false and a route refers to a non-existent cluster, the // route table will load and the router filter will return a 404 if the route // is selected at runtime. This setting defaults to true if the route table // is statically defined via the :ref:`route_config // ` // option. This setting default to false if the route table is loaded dynamically via the // :ref:`rds // ` // option. Users may wish to override the default behavior in certain cases (for example when // using CDS with a static route table). google.protobuf.BoolValue validate_clusters = 7; } message Vhds { // Configuration source specifier for VHDS. core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/api/v2/scoped_route.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "ScopedRouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.route.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` // Specifies a routing scope, which associates a // :ref:`Key` to a // :ref:`envoy_api_msg_RouteConfiguration` (identified by its resource name). // // The HTTP connection manager builds up a table consisting of these Key to // RouteConfiguration mappings, and looks up the RouteConfiguration to use per // request according to the algorithm specified in the // :ref:`scope_key_builder` // assigned to the HttpConnectionManager. // // For example, with the following configurations (in YAML): // // HttpConnectionManager config: // // .. code:: // // ... // scoped_routes: // name: foo-scoped-routes // scope_key_builder: // fragments: // - header_value_extractor: // name: X-Route-Selector // element_separator: , // element: // separator: = // key: vip // // ScopedRouteConfiguration resources (specified statically via // :ref:`scoped_route_configurations_list` // or obtained dynamically via SRDS): // // .. code:: // // (1) // name: route-scope1 // route_configuration_name: route-config1 // key: // fragments: // - string_key: 172.10.10.20 // // (2) // name: route-scope2 // route_configuration_name: route-config2 // key: // fragments: // - string_key: 172.20.20.30 // // A request from a client such as: // // .. code:: // // GET / HTTP/1.1 // Host: foo.com // X-Route-Selector: vip=172.10.10.20 // // would result in the routing table defined by the `route-config1` // RouteConfiguration being assigned to the HTTP request/stream. // message ScopedRouteConfiguration { // Specifies a key which is matched against the output of the // :ref:`scope_key_builder` // specified in the HttpConnectionManager. The matching is done per HTTP // request and is dependent on the order of the fragments contained in the // Key. message Key { message Fragment { oneof type { option (validate.required) = true; // A string to match against. string string_key = 1; } } // The ordered set of fragments to match against. The order must match the // fragments in the corresponding // :ref:`scope_key_builder`. repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } // The name assigned to the routing scope. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The resource name to use for a :ref:`envoy_api_msg_DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_RouteConfiguration` associated // with this scope. string route_configuration_name = 2 [(validate.rules).string = {min_bytes: 1}]; // The key to match against. Key key = 3 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/api/v2/srds.proto ================================================ syntax = "proto3"; package envoy.api.v2; import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import public "envoy/api/v2/scoped_route.proto"; option java_package = "io.envoyproxy.envoy.api.v2"; option java_outer_classname = "SrdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.route.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: SRDS] // * Routing :ref:`architecture overview ` // The Scoped Routes Discovery Service (SRDS) API distributes // :ref:`ScopedRouteConfiguration` // resources. Each ScopedRouteConfiguration resource represents a "routing // scope" containing a mapping that allows the HTTP connection manager to // dynamically assign a routing table (specified via a // :ref:`RouteConfiguration` message) to each // HTTP request. service ScopedRoutesDiscoveryService { option (envoy.annotations.resource).type = "envoy.api.v2.ScopedRouteConfiguration"; rpc StreamScopedRoutes(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } rpc DeltaScopedRoutes(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } rpc FetchScopedRoutes(DiscoveryRequest) returns (DiscoveryResponse) { option (google.api.http).post = "/v2/discovery:scoped-routes"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message SrdsDummy { } ================================================ FILE: api/envoy/config/README.md ================================================ Protocol buffer definitions for Envoy's bootstrap, filter, and service configuration. Visibility should be constrained to none or `//envoy/config/bootstrap/v2` by default. ================================================ FILE: api/envoy/config/accesslog/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/accesslog/v2/als.proto ================================================ syntax = "proto3"; package envoy.config.accesslog.v2; import "envoy/api/v2/core/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.grpc.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Access Log Service (ALS)] // Configuration for the built-in *envoy.access_loggers.http_grpc* // :ref:`AccessLog `. This configuration will // populate :ref:`StreamAccessLogsMessage.http_logs // `. // [#extension: envoy.access_loggers.http_grpc] message HttpGrpcAccessLogConfig { CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers // `. repeated string additional_request_headers_to_log = 2; // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers // `. repeated string additional_response_headers_to_log = 3; // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers // `. repeated string additional_response_trailers_to_log = 4; } // Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will // populate *StreamAccessLogsMessage.tcp_logs*. // [#extension: envoy.access_loggers.tcp_grpc] message TcpGrpcAccessLogConfig { CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; } // Common configuration for gRPC access logs. // [#next-free-field: 6] message CommonGrpcAccessLogConfig { // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier // `. This allows the // access log server to differentiate between different access logs coming from the same Envoy. string log_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The gRPC service for the access log service. api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to // 1 second. google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it // to zero effectively disables the batching. Defaults to 16384. google.protobuf.UInt32Value buffer_size_bytes = 4; // Additional filter state objects to log in :ref:`filter_state_objects // `. // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object. repeated string filter_state_objects_to_log = 5; } ================================================ FILE: api/envoy/config/accesslog/v2/file.proto ================================================ syntax = "proto3"; package envoy.config.accesslog.v2; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v2"; option java_outer_classname = "FileProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.access_loggers.file.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: File access log] // [#extension: envoy.access_loggers.file] // Custom configuration for an :ref:`AccessLog ` // that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* // AccessLog. message FileAccessLog { // A path to a local file to which to write the access log entries. string path = 1 [(validate.rules).string = {min_bytes: 1}]; oneof access_log_format { // Access log :ref:`format string`. // Envoy supports :ref:`custom access log formats ` as well as a // :ref:`default format `. string format = 2; // Access log :ref:`format dictionary`. All values // are rendered as strings. google.protobuf.Struct json_format = 3; // Access log :ref:`format dictionary`. Values are // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the // documentation for a specific command operator for details. google.protobuf.Struct typed_json_format = 4; } } ================================================ FILE: api/envoy/config/accesslog/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/accesslog/v3/accesslog.proto ================================================ syntax = "proto3"; package envoy.config.accesslog.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v3"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common access log types] message AccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.AccessLog"; reserved 3; reserved "config"; // The name of the access log implementation to instantiate. The name must // match a statically registered access log. Current built-in loggers include: // // #. "envoy.access_loggers.file" // #. "envoy.access_loggers.http_grpc" // #. "envoy.access_loggers.tcp_grpc" string name = 1; // Filter which is used to determine if the access log needs to be written. AccessLogFilter filter = 2; // Custom configuration that depends on the access log being instantiated. // Built-in configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig // ` // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig // ` oneof config_type { google.protobuf.Any typed_config = 4; } } // [#next-free-field: 13] message AccessLogFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.AccessLogFilter"; oneof filter_specifier { option (validate.required) = true; // Status code filter. StatusCodeFilter status_code_filter = 1; // Duration filter. DurationFilter duration_filter = 2; // Not health check filter. NotHealthCheckFilter not_health_check_filter = 3; // Traceable filter. TraceableFilter traceable_filter = 4; // Runtime filter. RuntimeFilter runtime_filter = 5; // And filter. AndFilter and_filter = 6; // Or filter. OrFilter or_filter = 7; // Header filter. HeaderFilter header_filter = 8; // Response flag filter. ResponseFlagFilter response_flag_filter = 9; // gRPC status filter. GrpcStatusFilter grpc_status_filter = 10; // Extension filter. ExtensionFilter extension_filter = 11; // Metadata Filter MetadataFilter metadata_filter = 12; } } // Filter on an integer comparison. message ComparisonFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.ComparisonFilter"; enum Op { // = EQ = 0; // >= GE = 1; // <= LE = 2; } // Comparison operator. Op op = 1 [(validate.rules).enum = {defined_only: true}]; // Value to compare against. core.v3.RuntimeUInt32 value = 2; } // Filters on HTTP response/status code. message StatusCodeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.StatusCodeFilter"; // Comparison. ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters on total request duration in milliseconds. message DurationFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.DurationFilter"; // Comparison. ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters for requests that are not health check requests. A health check // request is marked by the health check filter. message NotHealthCheckFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.NotHealthCheckFilter"; } // Filters for requests that are traceable. See the tracing overview for more // information on how a request becomes traceable. message TraceableFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.TraceableFilter"; } // Filters for random sampling of requests. message RuntimeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.RuntimeFilter"; // Runtime key to get an optional overridden numerator for use in the // *percent_sampled* field. If found in runtime, this value will replace the // default numerator. string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; // The default sampling percentage. If not specified, defaults to 0% with // denominator of 100. type.v3.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header // :ref:`x-request-id` being // present. If :ref:`x-request-id` // is present, the filter will consistently sample across multiple hosts based // on the runtime key value and the value extracted from // :ref:`x-request-id`. If it is // missing, or *use_independent_randomness* is set to true, the filter will // randomly sample based on the runtime key value alone. // *use_independent_randomness* can be used for logging kill switches within // complex nested :ref:`AndFilter // ` and :ref:`OrFilter // ` blocks that are easier to // reason about from a probability perspective (i.e., setting to true will // cause the filter to behave like an independent random variable when // composed within logical operator filters). bool use_independent_randomness = 3; } // Performs a logical “and” operation on the result of each filter in filters. // Filters are evaluated sequentially and if one of them returns false, the // filter returns false immediately. message AndFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.AndFilter"; repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; } // Performs a logical “or” operation on the result of each individual filter. // Filters are evaluated sequentially and if one of them returns true, the // filter returns true immediately. message OrFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.OrFilter"; repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; } // Filters requests based on the presence or value of a request header. message HeaderFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.HeaderFilter"; // Only requests with a header which matches the specified HeaderMatcher will // pass the filter check. route.v3.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. // A list of the response flags can be found // in the access log formatter // :ref:`documentation`. message ResponseFlagFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.ResponseFlagFilter"; // Only responses with the any of the flags listed in this field will be // logged. This field is optional. If it is not specified, then any response // flag will pass the filter check. repeated string flags = 1 [(validate.rules).repeated = { items { string { in: "LH" in: "UH" in: "UT" in: "LR" in: "UR" in: "UF" in: "UC" in: "UO" in: "NR" in: "DI" in: "FI" in: "RL" in: "UAEX" in: "RLSE" in: "DC" in: "URX" in: "SI" in: "IH" in: "DPE" in: "UMSDR" in: "RFCF" in: "NFCF" in: "DT" } } }]; } // Filters gRPC requests based on their response status. If a gRPC status is not // provided, the filter will infer the status from the HTTP status code. message GrpcStatusFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.GrpcStatusFilter"; enum Status { OK = 0; CANCELED = 1; UNKNOWN = 2; INVALID_ARGUMENT = 3; DEADLINE_EXCEEDED = 4; NOT_FOUND = 5; ALREADY_EXISTS = 6; PERMISSION_DENIED = 7; RESOURCE_EXHAUSTED = 8; FAILED_PRECONDITION = 9; ABORTED = 10; OUT_OF_RANGE = 11; UNIMPLEMENTED = 12; INTERNAL = 13; UNAVAILABLE = 14; DATA_LOSS = 15; UNAUTHENTICATED = 16; } // Logs only responses that have any one of the gRPC statuses in this field. repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; // If included and set to true, the filter will instead block all responses // with a gRPC status or inferred gRPC status enumerated in statuses, and // allow all other responses. bool exclude = 2; } // Filters based on matching dynamic metadata. // If the matcher path and key correspond to an existing key in dynamic // metadata, the request is logged only if the matcher value is equal to the // metadata value. If the matcher path and key *do not* correspond to an // existing key in dynamic metadata, the request is logged only if // match_if_key_not_found is "true" or unset. message MetadataFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.MetadataFilter"; // Matcher to check metadata for specified value. For example, to match on the // access_log_hint metadata, set the filter to "envoy.common" and the path to // "access_log_hint", and the value to "true". type.matcher.v3.MetadataMatcher matcher = 1; // Default result if the key does not exist in dynamic metadata: if unset or // true, then log; if false, then don't log. google.protobuf.BoolValue match_if_key_not_found = 2; } // Extension filter is statically registered at runtime. message ExtensionFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.accesslog.v2.ExtensionFilter"; reserved 2; reserved "config"; // The name of the filter implementation to instantiate. The name must // match a statically registered filter. string name = 1; // Custom configuration that depends on the filter being instantiated. oneof config_type { google.protobuf.Any typed_config = 3; } } ================================================ FILE: api/envoy/config/accesslog/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v3:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/accesslog/v4alpha/accesslog.proto ================================================ syntax = "proto3"; package envoy.config.accesslog.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "envoy/type/matcher/v4alpha/metadata.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.accesslog.v4alpha"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Common access log types] message AccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.AccessLog"; reserved 3; reserved "config"; // The name of the access log implementation to instantiate. The name must // match a statically registered access log. Current built-in loggers include: // // #. "envoy.access_loggers.file" // #. "envoy.access_loggers.http_grpc" // #. "envoy.access_loggers.tcp_grpc" string name = 1; // Filter which is used to determine if the access log needs to be written. AccessLogFilter filter = 2; // Custom configuration that depends on the access log being instantiated. // Built-in configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig // ` // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig // ` oneof config_type { google.protobuf.Any typed_config = 4; } } // [#next-free-field: 13] message AccessLogFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.AccessLogFilter"; oneof filter_specifier { option (validate.required) = true; // Status code filter. StatusCodeFilter status_code_filter = 1; // Duration filter. DurationFilter duration_filter = 2; // Not health check filter. NotHealthCheckFilter not_health_check_filter = 3; // Traceable filter. TraceableFilter traceable_filter = 4; // Runtime filter. RuntimeFilter runtime_filter = 5; // And filter. AndFilter and_filter = 6; // Or filter. OrFilter or_filter = 7; // Header filter. HeaderFilter header_filter = 8; // Response flag filter. ResponseFlagFilter response_flag_filter = 9; // gRPC status filter. GrpcStatusFilter grpc_status_filter = 10; // Extension filter. ExtensionFilter extension_filter = 11; // Metadata Filter MetadataFilter metadata_filter = 12; } } // Filter on an integer comparison. message ComparisonFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.ComparisonFilter"; enum Op { // = EQ = 0; // >= GE = 1; // <= LE = 2; } // Comparison operator. Op op = 1 [(validate.rules).enum = {defined_only: true}]; // Value to compare against. core.v4alpha.RuntimeUInt32 value = 2; } // Filters on HTTP response/status code. message StatusCodeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.StatusCodeFilter"; // Comparison. ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters on total request duration in milliseconds. message DurationFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.DurationFilter"; // Comparison. ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters for requests that are not health check requests. A health check // request is marked by the health check filter. message NotHealthCheckFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.NotHealthCheckFilter"; } // Filters for requests that are traceable. See the tracing overview for more // information on how a request becomes traceable. message TraceableFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.TraceableFilter"; } // Filters for random sampling of requests. message RuntimeFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.RuntimeFilter"; // Runtime key to get an optional overridden numerator for use in the // *percent_sampled* field. If found in runtime, this value will replace the // default numerator. string runtime_key = 1 [(validate.rules).string = {min_len: 1}]; // The default sampling percentage. If not specified, defaults to 0% with // denominator of 100. type.v3.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header // :ref:`x-request-id` being // present. If :ref:`x-request-id` // is present, the filter will consistently sample across multiple hosts based // on the runtime key value and the value extracted from // :ref:`x-request-id`. If it is // missing, or *use_independent_randomness* is set to true, the filter will // randomly sample based on the runtime key value alone. // *use_independent_randomness* can be used for logging kill switches within // complex nested :ref:`AndFilter // ` and :ref:`OrFilter // ` blocks that are easier to // reason about from a probability perspective (i.e., setting to true will // cause the filter to behave like an independent random variable when // composed within logical operator filters). bool use_independent_randomness = 3; } // Performs a logical “and” operation on the result of each filter in filters. // Filters are evaluated sequentially and if one of them returns false, the // filter returns false immediately. message AndFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.AndFilter"; repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; } // Performs a logical “or” operation on the result of each individual filter. // Filters are evaluated sequentially and if one of them returns true, the // filter returns true immediately. message OrFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.OrFilter"; repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; } // Filters requests based on the presence or value of a request header. message HeaderFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.HeaderFilter"; // Only requests with a header which matches the specified HeaderMatcher will // pass the filter check. route.v4alpha.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. // A list of the response flags can be found // in the access log formatter // :ref:`documentation`. message ResponseFlagFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.ResponseFlagFilter"; // Only responses with the any of the flags listed in this field will be // logged. This field is optional. If it is not specified, then any response // flag will pass the filter check. repeated string flags = 1 [(validate.rules).repeated = { items { string { in: "LH" in: "UH" in: "UT" in: "LR" in: "UR" in: "UF" in: "UC" in: "UO" in: "NR" in: "DI" in: "FI" in: "RL" in: "UAEX" in: "RLSE" in: "DC" in: "URX" in: "SI" in: "IH" in: "DPE" in: "UMSDR" in: "RFCF" in: "NFCF" in: "DT" } } }]; } // Filters gRPC requests based on their response status. If a gRPC status is not // provided, the filter will infer the status from the HTTP status code. message GrpcStatusFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.GrpcStatusFilter"; enum Status { OK = 0; CANCELED = 1; UNKNOWN = 2; INVALID_ARGUMENT = 3; DEADLINE_EXCEEDED = 4; NOT_FOUND = 5; ALREADY_EXISTS = 6; PERMISSION_DENIED = 7; RESOURCE_EXHAUSTED = 8; FAILED_PRECONDITION = 9; ABORTED = 10; OUT_OF_RANGE = 11; UNIMPLEMENTED = 12; INTERNAL = 13; UNAVAILABLE = 14; DATA_LOSS = 15; UNAUTHENTICATED = 16; } // Logs only responses that have any one of the gRPC statuses in this field. repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; // If included and set to true, the filter will instead block all responses // with a gRPC status or inferred gRPC status enumerated in statuses, and // allow all other responses. bool exclude = 2; } // Filters based on matching dynamic metadata. // If the matcher path and key correspond to an existing key in dynamic // metadata, the request is logged only if the matcher value is equal to the // metadata value. If the matcher path and key *do not* correspond to an // existing key in dynamic metadata, the request is logged only if // match_if_key_not_found is "true" or unset. message MetadataFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.MetadataFilter"; // Matcher to check metadata for specified value. For example, to match on the // access_log_hint metadata, set the filter to "envoy.common" and the path to // "access_log_hint", and the value to "true". type.matcher.v4alpha.MetadataMatcher matcher = 1; // Default result if the key does not exist in dynamic metadata: if unset or // true, then log; if false, then don't log. google.protobuf.BoolValue match_if_key_not_found = 2; } // Extension filter is statically registered at runtime. message ExtensionFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v3.ExtensionFilter"; reserved 2; reserved "config"; // The name of the filter implementation to instantiate. The name must // match a statically registered filter. string name = 1; // Custom configuration that depends on the filter being instantiated. oneof config_type { google.protobuf.Any typed_config = 3; } } ================================================ FILE: api/envoy/config/bootstrap/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2:pkg", "//envoy/api/v2/auth:pkg", "//envoy/api/v2/core:pkg", "//envoy/config/metrics/v2:pkg", "//envoy/config/overload/v2alpha:pkg", "//envoy/config/trace/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/bootstrap/v2/bootstrap.proto ================================================ syntax = "proto3"; package envoy.config.bootstrap.v2; import "envoy/api/v2/auth/secret.proto"; import "envoy/api/v2/cluster.proto"; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/core/event_service_config.proto"; import "envoy/api/v2/core/socket_option.proto"; import "envoy/api/v2/listener.proto"; import "envoy/config/metrics/v2/stats.proto"; import "envoy/config/overload/v2alpha/overload.proto"; import "envoy/config/trace/v2/http_tracer.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.bootstrap.v2"; option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Bootstrap] // This proto is supplied via the :option:`-c` CLI flag and acts as the root // of the Envoy v2 configuration. See the :ref:`v2 configuration overview // ` for more detail. // Bootstrap :ref:`configuration overview `. // [#next-free-field: 21] message Bootstrap { message StaticResources { // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. repeated api.v2.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's necessary // to have some initial cluster definitions available to allow Envoy to know // how to speak to the management server. These cluster definitions may not // use :ref:`EDS ` (i.e. they should be static // IP or DNS-based). repeated api.v2.Cluster clusters = 2; // These static secrets can be used by :ref:`SdsSecretConfig // ` repeated api.v2.auth.Secret secrets = 3; } message DynamicResources { reserved 4; // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. api.v2.core.ConfigSource lds_config = 1; // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. api.v2.core.ConfigSource cds_config = 2; // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC // `. Only // :ref:`ConfigSources ` that have // the :ref:`ads ` field set will be // streamed on the ADS channel. api.v2.core.ApiConfigSource ads_config = 3; } reserved 10; // Node identity to present to the management server and for instance // identification purposes (e.g. in generated headers). api.v2.core.Node node = 1; // Statically specified resources. StaticResources static_resources = 2; // xDS configuration sources. DynamicResources dynamic_resources = 3; // Configuration for the cluster manager which owns all upstream clusters // within the server. ClusterManager cluster_manager = 4; // Health discovery service config option. // (:ref:`core.ApiConfigSource `) api.v2.core.ApiConfigSource hds_config = 14; // Optional file system path to search for startup flag files. string flags_path = 5; // Optional set of stats sinks. repeated metrics.v2.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. metrics.v2.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and // gauges at a periodic interval. If not specified the default is 5000ms (5 // seconds). // Duration must be at least 1ms and at most 5 min. google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { lt {seconds: 300} gte {nanos: 1000000} }]; // Optional watchdog configuration. Watchdog watchdog = 8; // Configuration for an external tracing provider. // // .. attention:: // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider // `. trace.v2.Tracing tracing = 9; // Configuration for the runtime configuration provider (deprecated). If not // specified, a “null” provider will be used which will result in all defaults // being used. Runtime runtime = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // Configuration for the runtime configuration provider. If not // specified, a “null” provider will be used which will result in all defaults // being used. LayeredRuntime layered_runtime = 17; // Configuration for the local administration HTTP server. Admin admin = 12; // Optional overload manager configuration. overload.v2alpha.OverloadManager overload_manager = 15; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This // should normally be minimal overhead, but when using // :ref:`statsd `, it will send each observed value // over the wire individually because the statsd protocol doesn't have any way to represent a // histogram summary. Be aware that this can be a very large volume of data. bool enable_dispatcher_stats = 16; // Optional string which will be used in lieu of x-envoy in prefixing headers. // // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be // transformed into x-foo-retry-on etc. // // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the // headers Envoy will trust for core code and core extensions only. Be VERY careful making // changes to this string, especially in multi-layer Envoy deployments or deployments using // extensions which are not upstream. string header_prefix = 18; // Optional proxy version which will be used to set the value of :ref:`server.version statistic // ` if specified. Envoy will not process this value, it will be sent as is to // :ref:`stats sinks `. google.protobuf.UInt64Value stats_server_version_override = 19; // Always use TCP queries instead of UDP queries for DNS lookups. // This may be overridden on a per-cluster basis in cds_config, // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 20; } // Administration interface :ref:`operations documentation // `. message Admin { // The path to write the access log for the administration server. If no // access log is desired specify ‘/dev/null’. This is only required if // :ref:`address ` is set. string access_log_path = 1; // The cpu profiler output path for the administration server. If no profile // path is specified, the default is ‘/var/log/envoy/envoy.prof’. string profile_path = 2; // The TCP address that the administration server will listen on. // If not specified, Envoy will not start an administration server. api.v2.core.Address address = 3; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. repeated api.v2.core.SocketOption socket_options = 4; } // Cluster manager :ref:`architecture overview `. message ClusterManager { message OutlierDetection { // Specifies the path to the outlier event log. string event_log_path = 1; // [#not-implemented-hide:] // The gRPC service for the outlier detection event service. // If empty, outlier detection events won't be sent to a remote endpoint. api.v2.core.EventServiceConfig event_service = 2; } // Name of the local cluster (i.e., the cluster that owns the Envoy running // this configuration). In order to enable :ref:`zone aware routing // ` this option must be set. // If *local_cluster_name* is defined then :ref:`clusters // ` must be defined in the :ref:`Bootstrap // static cluster resources // `. This is unrelated to // the :option:`--service-cluster` option which does not `affect zone aware // routing `_. string local_cluster_name = 1; // Optional global configuration for outlier detection. OutlierDetection outlier_detection = 2; // Optional configuration used to bind newly established upstream connections. // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. api.v2.core.BindConfig upstream_bind_config = 3; // A management server endpoint to stream load stats to via // *StreamLoadStats*. This must have :ref:`api_type // ` :ref:`GRPC // `. api.v2.core.ApiConfigSource load_stats_config = 4; } // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. message Watchdog { // The duration after which Envoy counts a nonresponsive thread in the // *watchdog_miss* statistic. If not specified the default is 200ms. google.protobuf.Duration miss_timeout = 1; // The duration after which Envoy counts a nonresponsive thread in the // *watchdog_mega_miss* statistic. If not specified the default is // 1000ms. google.protobuf.Duration megamiss_timeout = 2; // If a watched thread has been nonresponsive for this duration, assume a // programming error and kill the entire Envoy process. Set to 0 to disable // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; // If at least two watched threads have been nonresponsive for at least this // duration assume a true deadlock and kill the entire Envoy process. Set to 0 // to disable this behavior. If not specified the default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; } // Runtime :ref:`configuration overview ` (deprecated). message Runtime { // The implementation assumes that the file system tree is accessed via a // symbolic link. An atomic link swap is used when a new tree should be // switched to. This parameter specifies the path to the symbolic link. Envoy // will watch the location for changes and reload the file system tree when // they happen. If this parameter is not set, there will be no disk based // runtime. string symlink_root = 1; // Specifies the subdirectory to load within the root directory. This is // useful if multiple systems share the same delivery mechanism. Envoy // configuration elements can be contained in a dedicated subdirectory. string subdirectory = 2; // Specifies an optional subdirectory to load within the root directory. If // specified and the directory exists, configuration values within this // directory will override those found in the primary subdirectory. This is // useful when Envoy is deployed across many different types of servers. // Sometimes it is useful to have a per service cluster directory for runtime // configuration. See below for exactly how the override directory is used. string override_subdirectory = 3; // Static base runtime. This will be :ref:`overridden // ` by other runtime layers, e.g. // disk or admin. This follows the :ref:`runtime protobuf JSON representation // encoding `. google.protobuf.Struct base = 4; } // [#next-free-field: 6] message RuntimeLayer { // :ref:`Disk runtime ` layer. message DiskLayer { // The implementation assumes that the file system tree is accessed via a // symbolic link. An atomic link swap is used when a new tree should be // switched to. This parameter specifies the path to the symbolic link. // Envoy will watch the location for changes and reload the file system tree // when they happen. See documentation on runtime :ref:`atomicity // ` for further details on how reloads are // treated. string symlink_root = 1; // Specifies the subdirectory to load within the root directory. This is // useful if multiple systems share the same delivery mechanism. Envoy // configuration elements can be contained in a dedicated subdirectory. string subdirectory = 3; // :ref:`Append ` the // service cluster to the path under symlink root. bool append_service_cluster = 2; } // :ref:`Admin console runtime ` layer. message AdminLayer { } // :ref:`Runtime Discovery Service (RTDS) ` layer. message RtdsLayer { // Resource to subscribe to at *rtds_config* for the RTDS layer. string name = 1; // RTDS configuration source. api.v2.core.ConfigSource rtds_config = 2; } // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof layer_specifier { option (validate.required) = true; // :ref:`Static runtime ` layer. // This follows the :ref:`runtime protobuf JSON representation encoding // `. Unlike static xDS resources, this static // layer is overridable by later layers in the runtime virtual filesystem. google.protobuf.Struct static_layer = 2; DiskLayer disk_layer = 3; AdminLayer admin_layer = 4; RtdsLayer rtds_layer = 5; } } // Runtime :ref:`configuration overview `. message LayeredRuntime { // The :ref:`layers ` of the runtime. This is ordered // such that later layers in the list overlay earlier entries. repeated RuntimeLayer layers = 1; } ================================================ FILE: api/envoy/config/bootstrap/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/bootstrap/v2:pkg", "//envoy/config/cluster/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/listener/v3:pkg", "//envoy/config/metrics/v3:pkg", "//envoy/config/overload/v3:pkg", "//envoy/config/trace/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/config/bootstrap/v3/bootstrap.proto ================================================ syntax = "proto3"; package envoy.config.bootstrap.v3; import "envoy/config/cluster/v3/cluster.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/event_service_config.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/listener.proto"; import "envoy/config/metrics/v3/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/config/trace/v3/http_tracer.proto"; import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/resource_locator.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.bootstrap.v3"; option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Bootstrap] // This proto is supplied via the :option:`-c` CLI flag and acts as the root // of the Envoy v2 configuration. See the :ref:`v2 configuration overview // ` for more detail. // Bootstrap :ref:`configuration overview `. // [#next-free-field: 28] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap"; message StaticResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap.StaticResources"; // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. repeated listener.v3.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's necessary // to have some initial cluster definitions available to allow Envoy to know // how to speak to the management server. These cluster definitions may not // use :ref:`EDS ` (i.e. they should be static // IP or DNS-based). repeated cluster.v3.Cluster clusters = 2; // These static secrets can be used by :ref:`SdsSecretConfig // ` repeated envoy.extensions.transport_sockets.tls.v3.Secret secrets = 3; } // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Bootstrap.DynamicResources"; reserved 4; // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. core.v3.ConfigSource lds_config = 1; // Resource locator for listener collection. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator lds_resources_locator = 5; // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v3.ConfigSource cds_config = 2; // Resource locator for cluster collection. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator cds_resources_locator = 6; // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC // `. Only // :ref:`ConfigSources ` that have // the :ref:`ads ` field set will be // streamed on the ADS channel. core.v3.ApiConfigSource ads_config = 3; } reserved 10, 11; reserved "runtime"; // Node identity to present to the management server and for instance // identification purposes (e.g. in generated headers). core.v3.Node node = 1; // A list of :ref:`Node ` field names // that will be included in the context parameters of the effective // *UdpaResourceLocator* that is sent in a discovery request when resource // locators are used for LDS/CDS. Any non-string field will have its JSON // encoding set as the context parameter value, with the exception of // metadata, which will be flattened (see example below). The supported field // names are: // - "cluster" // - "id" // - "locality.region" // - "locality.sub_zone" // - "locality.zone" // - "metadata" // - "user_agent_build_version.metadata" // - "user_agent_build_version.version" // - "user_agent_name" // - "user_agent_version" // // The node context parameters act as a base layer dictionary for the context // parameters (i.e. more specific resource specific context parameters will // override). Field names will be prefixed with “udpa.node.” when included in // context parameters. // // For example, if node_context_params is ``["user_agent_name", "metadata"]``, // the implied context parameters might be:: // // node.user_agent_name: "envoy" // node.metadata.foo: "{\"bar\": \"baz\"}" // node.metadata.some: "42" // node.metadata.thing: "\"thing\"" // // [#not-implemented-hide:] repeated string node_context_params = 26; // Statically specified resources. StaticResources static_resources = 2; // xDS configuration sources. DynamicResources dynamic_resources = 3; // Configuration for the cluster manager which owns all upstream clusters // within the server. ClusterManager cluster_manager = 4; // Health discovery service config option. // (:ref:`core.ApiConfigSource `) core.v3.ApiConfigSource hds_config = 14; // Optional file system path to search for startup flag files. string flags_path = 5; // Optional set of stats sinks. repeated metrics.v3.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. metrics.v3.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and // gauges at a periodic interval. If not specified the default is 5000ms (5 // seconds). // Duration must be at least 1ms and at most 5 min. google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { lt {seconds: 300} gte {nanos: 1000000} }]; // Optional watchdog configuration. // This is for a single watchdog configuration for the entire system. // Deprecated in favor of *watchdogs* which has finer granularity. Watchdog watchdog = 8 [deprecated = true]; // Optional watchdogs configuration. // This is used for specifying different watchdogs for the different subsystems. Watchdogs watchdogs = 27; // Configuration for an external tracing provider. // // .. attention:: // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider // `. trace.v3.Tracing tracing = 9 [deprecated = true]; // Configuration for the runtime configuration provider. If not // specified, a “null” provider will be used which will result in all defaults // being used. LayeredRuntime layered_runtime = 17; // Configuration for the local administration HTTP server. Admin admin = 12; // Optional overload manager configuration. overload.v3.OverloadManager overload_manager = 15 [ (udpa.annotations.security).configure_for_untrusted_downstream = true, (udpa.annotations.security).configure_for_untrusted_upstream = true ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This // should normally be minimal overhead, but when using // :ref:`statsd `, it will send each observed value // over the wire individually because the statsd protocol doesn't have any way to represent a // histogram summary. Be aware that this can be a very large volume of data. bool enable_dispatcher_stats = 16; // Optional string which will be used in lieu of x-envoy in prefixing headers. // // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be // transformed into x-foo-retry-on etc. // // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the // headers Envoy will trust for core code and core extensions only. Be VERY careful making // changes to this string, especially in multi-layer Envoy deployments or deployments using // extensions which are not upstream. string header_prefix = 18; // Optional proxy version which will be used to set the value of :ref:`server.version statistic // ` if specified. Envoy will not process this value, it will be sent as is to // :ref:`stats sinks `. google.protobuf.UInt64Value stats_server_version_override = 19; // Always use TCP queries instead of UDP queries for DNS lookups. // This may be overridden on a per-cluster basis in cds_config, // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 20; // Specifies optional bootstrap extensions to be instantiated at startup time. // Each item contains extension specific configuration. repeated core.v3.TypedExtensionConfig bootstrap_extensions = 21; // Configuration sources that will participate in // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as // follows: // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call // this *resource_authority*. // 2. *resource_authority* is compared against the authorities in any peer // *ConfigSource*. The peer *ConfigSource* is the configuration source // message which would have been used unconditionally for resolution // with opaque resource names. If there is a match with an authority, the // peer *ConfigSource* message is used. // 3. *resource_authority* is compared sequentially with the authorities in // each configuration source in *config_sources*. The first *ConfigSource* // to match wins. // 4. As a fallback, if no configuration source matches, then // *default_config_source* is used. // 5. If *default_config_source* is not specified, resolution fails. // [#not-implemented-hide:] repeated core.v3.ConfigSource config_sources = 22; // Default configuration source for *udpa.core.v1.ResourceLocator* if all // other resolution fails. // [#not-implemented-hide:] core.v3.ConfigSource default_config_source = 23; // Optional overriding of default socket interface. The value must be the name of one of the // socket interface factories initialized through a bootstrap extension string default_socket_interface = 24; // Global map of CertificateProvider instances. These instances are referred to by name in the // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name // ` // field. // [#not-implemented-hide:] map certificate_provider_instances = 25; } // Administration interface :ref:`operations documentation // `. message Admin { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Admin"; // The path to write the access log for the administration server. If no // access log is desired specify ‘/dev/null’. This is only required if // :ref:`address ` is set. string access_log_path = 1; // The cpu profiler output path for the administration server. If no profile // path is specified, the default is ‘/var/log/envoy/envoy.prof’. string profile_path = 2; // The TCP address that the administration server will listen on. // If not specified, Envoy will not start an administration server. core.v3.Address address = 3; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. repeated core.v3.SocketOption socket_options = 4; } // Cluster manager :ref:`architecture overview `. message ClusterManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.ClusterManager"; message OutlierDetection { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.ClusterManager.OutlierDetection"; // Specifies the path to the outlier event log. string event_log_path = 1; // [#not-implemented-hide:] // The gRPC service for the outlier detection event service. // If empty, outlier detection events won't be sent to a remote endpoint. core.v3.EventServiceConfig event_service = 2; } // Name of the local cluster (i.e., the cluster that owns the Envoy running // this configuration). In order to enable :ref:`zone aware routing // ` this option must be set. // If *local_cluster_name* is defined then :ref:`clusters // ` must be defined in the :ref:`Bootstrap // static cluster resources // `. This is unrelated to // the :option:`--service-cluster` option which does not `affect zone aware // routing `_. string local_cluster_name = 1; // Optional global configuration for outlier detection. OutlierDetection outlier_detection = 2; // Optional configuration used to bind newly established upstream connections. // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. core.v3.BindConfig upstream_bind_config = 3; // A management server endpoint to stream load stats to via // *StreamLoadStats*. This must have :ref:`api_type // ` :ref:`GRPC // `. core.v3.ApiConfigSource load_stats_config = 4; } // Allows you to specify different watchdog configs for different subsystems. // This allows finer tuned policies for the watchdog. If a subsystem is omitted // the default values for that system will be used. message Watchdogs { // Watchdog for the main thread. Watchdog main_thread_watchdog = 1; // Watchdog for the worker threads. Watchdog worker_watchdog = 2; } // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. // [#next-free-field: 8] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Watchdog"; message WatchdogAction { // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS. // Within an event type, actions execute in the order they are configured. // For KILL/MULTIKILL there is a default PANIC that will run after the // registered actions and kills the process if it wasn't already killed. // It might be useful to specify several debug actions, and possibly an // alternate FATAL action. enum WatchdogEvent { UNKNOWN = 0; KILL = 1; MULTIKILL = 2; MEGAMISS = 3; MISS = 4; } // Extension specific configuration for the action. core.v3.TypedExtensionConfig config = 1; WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}]; } // Register actions that will fire on given WatchDog events. // See *WatchDogAction* for priority of events. repeated WatchdogAction actions = 7; // The duration after which Envoy counts a nonresponsive thread in the // *watchdog_miss* statistic. If not specified the default is 200ms. google.protobuf.Duration miss_timeout = 1; // The duration after which Envoy counts a nonresponsive thread in the // *watchdog_mega_miss* statistic. If not specified the default is // 1000ms. google.protobuf.Duration megamiss_timeout = 2; // If a watched thread has been nonresponsive for this duration, assume a // programming error and kill the entire Envoy process. Set to 0 to disable // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is // enabled. Enabling this feature would help to reduce risk of synchronized // watchdog kill events across proxies due to external triggers. Set to 0 to // disable. If not specified the default is 0 (disabled). google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) // threads have been nonresponsive for at least this duration kill the entire // Envoy process. Set to 0 to disable this behavior. If not specified the // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; // Sets the threshold for *multikill_timeout* in terms of the percentage of // nonresponsive threads required for the *multikill_timeout*. // If not specified the default is 0. type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). message Runtime { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.Runtime"; // The implementation assumes that the file system tree is accessed via a // symbolic link. An atomic link swap is used when a new tree should be // switched to. This parameter specifies the path to the symbolic link. Envoy // will watch the location for changes and reload the file system tree when // they happen. If this parameter is not set, there will be no disk based // runtime. string symlink_root = 1; // Specifies the subdirectory to load within the root directory. This is // useful if multiple systems share the same delivery mechanism. Envoy // configuration elements can be contained in a dedicated subdirectory. string subdirectory = 2; // Specifies an optional subdirectory to load within the root directory. If // specified and the directory exists, configuration values within this // directory will override those found in the primary subdirectory. This is // useful when Envoy is deployed across many different types of servers. // Sometimes it is useful to have a per service cluster directory for runtime // configuration. See below for exactly how the override directory is used. string override_subdirectory = 3; // Static base runtime. This will be :ref:`overridden // ` by other runtime layers, e.g. // disk or admin. This follows the :ref:`runtime protobuf JSON representation // encoding `. google.protobuf.Struct base = 4; } // [#next-free-field: 6] message RuntimeLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.RuntimeLayer"; // :ref:`Disk runtime ` layer. message DiskLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.RuntimeLayer.DiskLayer"; // The implementation assumes that the file system tree is accessed via a // symbolic link. An atomic link swap is used when a new tree should be // switched to. This parameter specifies the path to the symbolic link. // Envoy will watch the location for changes and reload the file system tree // when they happen. See documentation on runtime :ref:`atomicity // ` for further details on how reloads are // treated. string symlink_root = 1; // Specifies the subdirectory to load within the root directory. This is // useful if multiple systems share the same delivery mechanism. Envoy // configuration elements can be contained in a dedicated subdirectory. string subdirectory = 3; // :ref:`Append ` the // service cluster to the path under symlink root. bool append_service_cluster = 2; } // :ref:`Admin console runtime ` layer. message AdminLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.RuntimeLayer.AdminLayer"; } // :ref:`Runtime Discovery Service (RTDS) ` layer. message RtdsLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.RuntimeLayer.RtdsLayer"; // Resource to subscribe to at *rtds_config* for the RTDS layer. string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // Resource locator for RTDS layer. This is mutually exclusive to *name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator rtds_resource_locator = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // RTDS configuration source. core.v3.ConfigSource rtds_config = 2; } // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. string name = 1 [(validate.rules).string = {min_len: 1}]; oneof layer_specifier { option (validate.required) = true; // :ref:`Static runtime ` layer. // This follows the :ref:`runtime protobuf JSON representation encoding // `. Unlike static xDS resources, this static // layer is overridable by later layers in the runtime virtual filesystem. google.protobuf.Struct static_layer = 2; DiskLayer disk_layer = 3; AdminLayer admin_layer = 4; RtdsLayer rtds_layer = 5; } } // Runtime :ref:`configuration overview `. message LayeredRuntime { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v2.LayeredRuntime"; // The :ref:`layers ` of the runtime. This is ordered // such that later layers in the list overlay earlier entries. repeated RuntimeLayer layers = 1; } ================================================ FILE: api/envoy/config/bootstrap/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/listener/v4alpha:pkg", "//envoy/config/metrics/v4alpha:pkg", "//envoy/config/overload/v3:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/config/bootstrap/v4alpha/bootstrap.proto ================================================ syntax = "proto3"; package envoy.config.bootstrap.v4alpha; import "envoy/config/cluster/v4alpha/cluster.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; import "envoy/config/listener/v4alpha/listener.proto"; import "envoy/config/metrics/v4alpha/stats.proto"; import "envoy/config/overload/v3/overload.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/resource_locator.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.bootstrap.v4alpha"; option java_outer_classname = "BootstrapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Bootstrap] // This proto is supplied via the :option:`-c` CLI flag and acts as the root // of the Envoy v2 configuration. See the :ref:`v2 configuration overview // ` for more detail. // Bootstrap :ref:`configuration overview `. // [#next-free-field: 28] message Bootstrap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap"; message StaticResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.StaticResources"; // Static :ref:`Listeners `. These listeners are // available regardless of LDS configuration. repeated listener.v4alpha.Listener listeners = 1; // If a network based configuration source is specified for :ref:`cds_config // `, it's necessary // to have some initial cluster definitions available to allow Envoy to know // how to speak to the management server. These cluster definitions may not // use :ref:`EDS ` (i.e. they should be static // IP or DNS-based). repeated cluster.v4alpha.Cluster clusters = 2; // These static secrets can be used by :ref:`SdsSecretConfig // ` repeated envoy.extensions.transport_sockets.tls.v4alpha.Secret secrets = 3; } // [#next-free-field: 7] message DynamicResources { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Bootstrap.DynamicResources"; reserved 4; // All :ref:`Listeners ` are provided by a single // :ref:`LDS ` configuration source. core.v4alpha.ConfigSource lds_config = 1; // Resource locator for listener collection. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator lds_resources_locator = 5; // All post-bootstrap :ref:`Cluster ` definitions are // provided by a single :ref:`CDS ` // configuration source. core.v4alpha.ConfigSource cds_config = 2; // Resource locator for cluster collection. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator cds_resources_locator = 6; // A single :ref:`ADS ` source may be optionally // specified. This must have :ref:`api_type // ` :ref:`GRPC // `. Only // :ref:`ConfigSources ` that have // the :ref:`ads ` field set will be // streamed on the ADS channel. core.v4alpha.ApiConfigSource ads_config = 3; } reserved 10, 11, 8, 9; reserved "runtime", "watchdog", "tracing"; // Node identity to present to the management server and for instance // identification purposes (e.g. in generated headers). core.v4alpha.Node node = 1; // A list of :ref:`Node ` field names // that will be included in the context parameters of the effective // *UdpaResourceLocator* that is sent in a discovery request when resource // locators are used for LDS/CDS. Any non-string field will have its JSON // encoding set as the context parameter value, with the exception of // metadata, which will be flattened (see example below). The supported field // names are: // - "cluster" // - "id" // - "locality.region" // - "locality.sub_zone" // - "locality.zone" // - "metadata" // - "user_agent_build_version.metadata" // - "user_agent_build_version.version" // - "user_agent_name" // - "user_agent_version" // // The node context parameters act as a base layer dictionary for the context // parameters (i.e. more specific resource specific context parameters will // override). Field names will be prefixed with “udpa.node.” when included in // context parameters. // // For example, if node_context_params is ``["user_agent_name", "metadata"]``, // the implied context parameters might be:: // // node.user_agent_name: "envoy" // node.metadata.foo: "{\"bar\": \"baz\"}" // node.metadata.some: "42" // node.metadata.thing: "\"thing\"" // // [#not-implemented-hide:] repeated string node_context_params = 26; // Statically specified resources. StaticResources static_resources = 2; // xDS configuration sources. DynamicResources dynamic_resources = 3; // Configuration for the cluster manager which owns all upstream clusters // within the server. ClusterManager cluster_manager = 4; // Health discovery service config option. // (:ref:`core.ApiConfigSource `) core.v4alpha.ApiConfigSource hds_config = 14; // Optional file system path to search for startup flag files. string flags_path = 5; // Optional set of stats sinks. repeated metrics.v4alpha.StatsSink stats_sinks = 6; // Configuration for internal processing of stats. metrics.v4alpha.StatsConfig stats_config = 13; // Optional duration between flushes to configured stats sinks. For // performance reasons Envoy latches counters and only flushes counters and // gauges at a periodic interval. If not specified the default is 5000ms (5 // seconds). // Duration must be at least 1ms and at most 5 min. google.protobuf.Duration stats_flush_interval = 7 [(validate.rules).duration = { lt {seconds: 300} gte {nanos: 1000000} }]; // Optional watchdogs configuration. // This is used for specifying different watchdogs for the different subsystems. Watchdogs watchdogs = 27; // Configuration for the runtime configuration provider. If not // specified, a “null” provider will be used which will result in all defaults // being used. LayeredRuntime layered_runtime = 17; // Configuration for the local administration HTTP server. Admin admin = 12; // Optional overload manager configuration. overload.v3.OverloadManager overload_manager = 15 [ (udpa.annotations.security).configure_for_untrusted_downstream = true, (udpa.annotations.security).configure_for_untrusted_upstream = true ]; // Enable :ref:`stats for event dispatcher `, defaults to false. // Note that this records a value for each iteration of the event loop on every thread. This // should normally be minimal overhead, but when using // :ref:`statsd `, it will send each observed value // over the wire individually because the statsd protocol doesn't have any way to represent a // histogram summary. Be aware that this can be a very large volume of data. bool enable_dispatcher_stats = 16; // Optional string which will be used in lieu of x-envoy in prefixing headers. // // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be // transformed into x-foo-retry-on etc. // // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the // headers Envoy will trust for core code and core extensions only. Be VERY careful making // changes to this string, especially in multi-layer Envoy deployments or deployments using // extensions which are not upstream. string header_prefix = 18; // Optional proxy version which will be used to set the value of :ref:`server.version statistic // ` if specified. Envoy will not process this value, it will be sent as is to // :ref:`stats sinks `. google.protobuf.UInt64Value stats_server_version_override = 19; // Always use TCP queries instead of UDP queries for DNS lookups. // This may be overridden on a per-cluster basis in cds_config, // when :ref:`dns_resolvers ` and // :ref:`use_tcp_for_dns_lookups ` are // specified. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 20; // Specifies optional bootstrap extensions to be instantiated at startup time. // Each item contains extension specific configuration. repeated core.v4alpha.TypedExtensionConfig bootstrap_extensions = 21; // Configuration sources that will participate in // *udpa.core.v1.ResourceLocator* authority resolution. The algorithm is as // follows: // 1. The authority field is taken from the *udpa.core.v1.ResourceLocator*, call // this *resource_authority*. // 2. *resource_authority* is compared against the authorities in any peer // *ConfigSource*. The peer *ConfigSource* is the configuration source // message which would have been used unconditionally for resolution // with opaque resource names. If there is a match with an authority, the // peer *ConfigSource* message is used. // 3. *resource_authority* is compared sequentially with the authorities in // each configuration source in *config_sources*. The first *ConfigSource* // to match wins. // 4. As a fallback, if no configuration source matches, then // *default_config_source* is used. // 5. If *default_config_source* is not specified, resolution fails. // [#not-implemented-hide:] repeated core.v4alpha.ConfigSource config_sources = 22; // Default configuration source for *udpa.core.v1.ResourceLocator* if all // other resolution fails. // [#not-implemented-hide:] core.v4alpha.ConfigSource default_config_source = 23; // Optional overriding of default socket interface. The value must be the name of one of the // socket interface factories initialized through a bootstrap extension string default_socket_interface = 24; // Global map of CertificateProvider instances. These instances are referred to by name in the // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name // ` // field. // [#not-implemented-hide:] map certificate_provider_instances = 25; } // Administration interface :ref:`operations documentation // `. message Admin { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Admin"; // The path to write the access log for the administration server. If no // access log is desired specify ‘/dev/null’. This is only required if // :ref:`address ` is set. string access_log_path = 1; // The cpu profiler output path for the administration server. If no profile // path is specified, the default is ‘/var/log/envoy/envoy.prof’. string profile_path = 2; // The TCP address that the administration server will listen on. // If not specified, Envoy will not start an administration server. core.v4alpha.Address address = 3; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. repeated core.v4alpha.SocketOption socket_options = 4; } // Cluster manager :ref:`architecture overview `. message ClusterManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.ClusterManager"; message OutlierDetection { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.ClusterManager.OutlierDetection"; // Specifies the path to the outlier event log. string event_log_path = 1; // [#not-implemented-hide:] // The gRPC service for the outlier detection event service. // If empty, outlier detection events won't be sent to a remote endpoint. core.v4alpha.EventServiceConfig event_service = 2; } // Name of the local cluster (i.e., the cluster that owns the Envoy running // this configuration). In order to enable :ref:`zone aware routing // ` this option must be set. // If *local_cluster_name* is defined then :ref:`clusters // ` must be defined in the :ref:`Bootstrap // static cluster resources // `. This is unrelated to // the :option:`--service-cluster` option which does not `affect zone aware // routing `_. string local_cluster_name = 1; // Optional global configuration for outlier detection. OutlierDetection outlier_detection = 2; // Optional configuration used to bind newly established upstream connections. // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config. core.v4alpha.BindConfig upstream_bind_config = 3; // A management server endpoint to stream load stats to via // *StreamLoadStats*. This must have :ref:`api_type // ` :ref:`GRPC // `. core.v4alpha.ApiConfigSource load_stats_config = 4; } // Allows you to specify different watchdog configs for different subsystems. // This allows finer tuned policies for the watchdog. If a subsystem is omitted // the default values for that system will be used. message Watchdogs { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdogs"; // Watchdog for the main thread. Watchdog main_thread_watchdog = 1; // Watchdog for the worker threads. Watchdog worker_watchdog = 2; } // Envoy process watchdog configuration. When configured, this monitors for // nonresponsive threads and kills the process after the configured thresholds. // See the :ref:`watchdog documentation ` for more information. // [#next-free-field: 8] message Watchdog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog"; message WatchdogAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Watchdog.WatchdogAction"; // The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS. // Within an event type, actions execute in the order they are configured. // For KILL/MULTIKILL there is a default PANIC that will run after the // registered actions and kills the process if it wasn't already killed. // It might be useful to specify several debug actions, and possibly an // alternate FATAL action. enum WatchdogEvent { UNKNOWN = 0; KILL = 1; MULTIKILL = 2; MEGAMISS = 3; MISS = 4; } // Extension specific configuration for the action. core.v4alpha.TypedExtensionConfig config = 1; WatchdogEvent event = 2 [(validate.rules).enum = {defined_only: true}]; } // Register actions that will fire on given WatchDog events. // See *WatchDogAction* for priority of events. repeated WatchdogAction actions = 7; // The duration after which Envoy counts a nonresponsive thread in the // *watchdog_miss* statistic. If not specified the default is 200ms. google.protobuf.Duration miss_timeout = 1; // The duration after which Envoy counts a nonresponsive thread in the // *watchdog_mega_miss* statistic. If not specified the default is // 1000ms. google.protobuf.Duration megamiss_timeout = 2; // If a watched thread has been nonresponsive for this duration, assume a // programming error and kill the entire Envoy process. Set to 0 to disable // kill behavior. If not specified the default is 0 (disabled). google.protobuf.Duration kill_timeout = 3; // Defines the maximum jitter used to adjust the *kill_timeout* if *kill_timeout* is // enabled. Enabling this feature would help to reduce risk of synchronized // watchdog kill events across proxies due to external triggers. Set to 0 to // disable. If not specified the default is 0 (disabled). google.protobuf.Duration max_kill_timeout_jitter = 6 [(validate.rules).duration = {gte {}}]; // If max(2, ceil(registered_threads * Fraction(*multikill_threshold*))) // threads have been nonresponsive for at least this duration kill the entire // Envoy process. Set to 0 to disable this behavior. If not specified the // default is 0 (disabled). google.protobuf.Duration multikill_timeout = 4; // Sets the threshold for *multikill_timeout* in terms of the percentage of // nonresponsive threads required for the *multikill_timeout*. // If not specified the default is 0. type.v3.Percent multikill_threshold = 5; } // Runtime :ref:`configuration overview ` (deprecated). message Runtime { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.Runtime"; // The implementation assumes that the file system tree is accessed via a // symbolic link. An atomic link swap is used when a new tree should be // switched to. This parameter specifies the path to the symbolic link. Envoy // will watch the location for changes and reload the file system tree when // they happen. If this parameter is not set, there will be no disk based // runtime. string symlink_root = 1; // Specifies the subdirectory to load within the root directory. This is // useful if multiple systems share the same delivery mechanism. Envoy // configuration elements can be contained in a dedicated subdirectory. string subdirectory = 2; // Specifies an optional subdirectory to load within the root directory. If // specified and the directory exists, configuration values within this // directory will override those found in the primary subdirectory. This is // useful when Envoy is deployed across many different types of servers. // Sometimes it is useful to have a per service cluster directory for runtime // configuration. See below for exactly how the override directory is used. string override_subdirectory = 3; // Static base runtime. This will be :ref:`overridden // ` by other runtime layers, e.g. // disk or admin. This follows the :ref:`runtime protobuf JSON representation // encoding `. google.protobuf.Struct base = 4; } // [#next-free-field: 6] message RuntimeLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.RuntimeLayer"; // :ref:`Disk runtime ` layer. message DiskLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer"; // The implementation assumes that the file system tree is accessed via a // symbolic link. An atomic link swap is used when a new tree should be // switched to. This parameter specifies the path to the symbolic link. // Envoy will watch the location for changes and reload the file system tree // when they happen. See documentation on runtime :ref:`atomicity // ` for further details on how reloads are // treated. string symlink_root = 1; // Specifies the subdirectory to load within the root directory. This is // useful if multiple systems share the same delivery mechanism. Envoy // configuration elements can be contained in a dedicated subdirectory. string subdirectory = 3; // :ref:`Append ` the // service cluster to the path under symlink root. bool append_service_cluster = 2; } // :ref:`Admin console runtime ` layer. message AdminLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer"; } // :ref:`Runtime Discovery Service (RTDS) ` layer. message RtdsLayer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer"; oneof name_specifier { // Resource to subscribe to at *rtds_config* for the RTDS layer. string name = 1; // Resource locator for RTDS layer. This is mutually exclusive to *name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator rtds_resource_locator = 3; } // RTDS configuration source. core.v4alpha.ConfigSource rtds_config = 2; } // Descriptive name for the runtime layer. This is only used for the runtime // :http:get:`/runtime` output. string name = 1 [(validate.rules).string = {min_len: 1}]; oneof layer_specifier { option (validate.required) = true; // :ref:`Static runtime ` layer. // This follows the :ref:`runtime protobuf JSON representation encoding // `. Unlike static xDS resources, this static // layer is overridable by later layers in the runtime virtual filesystem. google.protobuf.Struct static_layer = 2; DiskLayer disk_layer = 3; AdminLayer admin_layer = 4; RtdsLayer rtds_layer = 5; } } // Runtime :ref:`configuration overview `. message LayeredRuntime { option (udpa.annotations.versioning).previous_message_type = "envoy.config.bootstrap.v3.LayeredRuntime"; // The :ref:`layers ` of the runtime. This is ordered // such that later layers in the list overlay earlier entries. repeated RuntimeLayer layers = 1; } ================================================ FILE: api/envoy/config/cluster/aggregate/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/cluster/aggregate/v2alpha/cluster.proto ================================================ syntax = "proto3"; package envoy.config.cluster.aggregate.v2alpha; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.aggregate.v2alpha"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.clusters.aggregate.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Aggregate cluster configuration] // Configuration for the aggregate cluster. See the :ref:`architecture overview // ` for more information. // [#extension: envoy.clusters.aggregate] message ClusterConfig { // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they // appear in this list. repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/cluster/dynamic_forward_proxy/v2alpha/cluster.proto ================================================ syntax = "proto3"; package envoy.config.cluster.dynamic_forward_proxy.v2alpha; import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.dynamic_forward_proxy.v2alpha"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.clusters.dynamic_forward_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamic forward proxy cluster configuration] // Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview // ` for more information. // [#extension: envoy.clusters.dynamic_forward_proxy] message ClusterConfig { // The DNS cache configuration that the cluster will attach to. Note this configuration must // match that of associated :ref:`dynamic forward proxy HTTP filter configuration // `. common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/cluster/redis/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/cluster/redis/redis_cluster.proto ================================================ syntax = "proto3"; package envoy.config.cluster.redis; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.redis"; option java_outer_classname = "RedisClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Redis Cluster Configuration] // This cluster adds support for `Redis Cluster `_, as part // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a // shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS // command `_. This result is then stored locally, and // updated at user-configured intervals. // // Additionally, if // :ref:`enable_redirection` // is true, then moved and ask redirection errors from upstream servers will trigger a topology // refresh when they exceed a user-configured error threshold. // // Example: // // .. code-block:: yaml // // name: name // connect_timeout: 0.25s // dns_lookup_family: V4_ONLY // hosts: // - socket_address: // address: foo.bar.com // port_value: 22120 // cluster_type: // name: envoy.clusters.redis // typed_config: // "@type": type.googleapis.com/google.protobuf.Struct // value: // cluster_refresh_rate: 30s // cluster_refresh_timeout: 0.5s // redirect_refresh_interval: 10s // redirect_refresh_threshold: 10 // [#extension: envoy.clusters.redis] // [#next-free-field: 7] message RedisClusterConfig { // Interval between successive topology refresh requests. If not set, this defaults to 5s. google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}]; // Timeout for topology refresh request. If not set, this defaults to 3s. google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}]; // The minimum interval that must pass after triggering a topology refresh request before a new // request can possibly be triggered again. Any errors received during one of these // time intervals are ignored. If not set, this defaults to 5s. google.protobuf.Duration redirect_refresh_interval = 3; // The number of redirection errors that must be received before // triggering a topology refresh request. If not set, this defaults to 5. // If this is set to 0, topology refresh after redirect is disabled. google.protobuf.UInt32Value redirect_refresh_threshold = 4; // The number of failures that must be received before triggering a topology refresh request. // If not set, this defaults to 0, which disables the topology refresh due to failure. uint32 failure_refresh_threshold = 5; // The number of hosts became degraded or unhealthy before triggering a topology refresh request. // If not set, this defaults to 0, which disables the topology refresh due to degraded or // unhealthy host. uint32 host_degraded_refresh_threshold = 6; } ================================================ FILE: api/envoy/config/cluster/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2:pkg", "//envoy/api/v2/cluster:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/config/cluster/v3/circuit_breaker.proto ================================================ syntax = "proto3"; package envoy.config.cluster.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Circuit breakers] // :ref:`Circuit breaking` settings can be // specified individually for each defined priority. message CircuitBreakers { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.CircuitBreakers"; // A Thresholds defines CircuitBreaker settings for a // :ref:`RoutingPriority`. // [#next-free-field: 9] message Thresholds { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.CircuitBreakers.Thresholds"; message RetryBudget { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.CircuitBreakers.Thresholds.RetryBudget"; // Specifies the limit on concurrent retries as a percentage of the sum of active requests and // active pending requests. For example, if there are 100 active requests and the // budget_percent is set to 25, there may be 25 active retries. // // This parameter is optional. Defaults to 20%. type.v3.Percent budget_percent = 1; // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the // number of active retries may never go below this number. // // This parameter is optional. Defaults to 3. google.protobuf.UInt32Value min_retry_concurrency = 2; } // The :ref:`RoutingPriority` // the specified CircuitBreaker settings apply to. core.v3.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; // The maximum number of connections that Envoy will make to the upstream // cluster. If not specified, the default is 1024. google.protobuf.UInt32Value max_connections = 2; // The maximum number of pending requests that Envoy will allow to the // upstream cluster. If not specified, the default is 1024. google.protobuf.UInt32Value max_pending_requests = 3; // The maximum number of parallel requests that Envoy will make to the // upstream cluster. If not specified, the default is 1024. google.protobuf.UInt32Value max_requests = 4; // The maximum number of parallel retries that Envoy will allow to the // upstream cluster. If not specified, the default is 3. google.protobuf.UInt32Value max_retries = 5; // Specifies a limit on concurrent retries in relation to the number of active requests. This // parameter is optional. // // .. note:: // // If this field is set, the retry budget will override any configured retry circuit // breaker. RetryBudget retry_budget = 8; // If track_remaining is true, then stats will be published that expose // the number of resources remaining until the circuit breakers open. If // not specified, the default is false. // // .. note:: // // If a retry budget is used in lieu of the max_retries circuit breaker, // the remaining retry resources remaining will not be tracked. bool track_remaining = 6; // The maximum number of connection pools per cluster that Envoy will concurrently support at // once. If not specified, the default is unlimited. Set this for clusters which create a // large number of connection pools. See // :ref:`Circuit Breaking ` for // more details. google.protobuf.UInt32Value max_connection_pools = 7; } // If multiple :ref:`Thresholds` // are defined with the same :ref:`RoutingPriority`, // the first one in the list is used. If no Thresholds is defined for a given // :ref:`RoutingPriority`, the default values // are used. repeated Thresholds thresholds = 1; } ================================================ FILE: api/envoy/config/cluster/v3/cluster.proto ================================================ syntax = "proto3"; package envoy.config.cluster.v3; import "envoy/config/cluster/v3/circuit_breaker.proto"; import "envoy/config/cluster/v3/filter.proto"; import "envoy/config/cluster/v3/outlier_detection.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/config/core/v3/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/collection_entry.proto"; import "udpa/core/v1/resource_locator.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cluster configuration] // Cluster list collections. Entries are *Cluster* resources or references. // [#not-implemented-hide:] message ClusterCollection { udpa.core.v1.CollectionEntry entries = 1; } // Configuration for a single upstream cluster. // [#next-free-field: 53] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster"; // Refer to :ref:`service discovery type ` // for an explanation on each type. enum DiscoveryType { // Refer to the :ref:`static discovery type` // for an explanation. STATIC = 0; // Refer to the :ref:`strict DNS discovery // type` // for an explanation. STRICT_DNS = 1; // Refer to the :ref:`logical DNS discovery // type` // for an explanation. LOGICAL_DNS = 2; // Refer to the :ref:`service discovery type` // for an explanation. EDS = 3; // Refer to the :ref:`original destination discovery // type` // for an explanation. ORIGINAL_DST = 4; } // Refer to :ref:`load balancer type ` architecture // overview section for information on each type. enum LbPolicy { reserved 4; reserved "ORIGINAL_DST_LB"; // Refer to the :ref:`round robin load balancing // policy` // for an explanation. ROUND_ROBIN = 0; // Refer to the :ref:`least request load balancing // policy` // for an explanation. LEAST_REQUEST = 1; // Refer to the :ref:`ring hash load balancing // policy` // for an explanation. RING_HASH = 2; // Refer to the :ref:`random load balancing // policy` // for an explanation. RANDOM = 3; // Refer to the :ref:`Maglev load balancing policy` // for an explanation. MAGLEV = 5; // This load balancer type must be specified if the configured cluster provides a cluster // specific load balancer. Consult the configured cluster's documentation for whether to set // this option or not. CLUSTER_PROVIDED = 6; // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy // ` field to determine the LB policy. // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field // and instead using the new load_balancing_policy field as the one and only mechanism for // configuring this.] LOAD_BALANCING_POLICY_CONFIG = 7; } // When V4_ONLY is selected, the DNS resolver will only perform a lookup for // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will // only perform a lookup for addresses in the IPv6 family. If AUTO is // specified, the DNS resolver will first perform a lookup for addresses in // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. // For cluster types other than // :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS`, // this setting is // ignored. enum DnsLookupFamily { AUTO = 0; V4_ONLY = 1; V6_ONLY = 2; } enum ClusterProtocolSelection { // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). // If :ref:`http2_protocol_options ` are // present, HTTP2 will be used, otherwise HTTP1.1 will be used. USE_CONFIGURED_PROTOCOL = 0; // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. USE_DOWNSTREAM_PROTOCOL = 1; } // TransportSocketMatch specifies what transport socket config will be used // when the match conditions are satisfied. message TransportSocketMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.TransportSocketMatch"; // The name of the match, used in stats generation. string name = 1 [(validate.rules).string = {min_len: 1}]; // Optional endpoint metadata match criteria. // The connection to the endpoint with metadata matching what is set in this field // will use the transport socket configuration specified here. // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match // against the values specified in this field. google.protobuf.Struct match = 2; // The configuration of the transport socket. core.v3.TransportSocket transport_socket = 3; } // Extended cluster type. message CustomClusterType { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.CustomClusterType"; // The type of the cluster to instantiate. The name must match a supported cluster type. string name = 1 [(validate.rules).string = {min_len: 1}]; // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. google.protobuf.Any typed_config = 2; } // Only valid when discovery type is EDS. message EdsClusterConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.EdsClusterConfig"; // Configuration for the source of EDS updates for this Cluster. core.v3.ConfigSource eds_config = 1; // Optional alternative to cluster name to present to EDS. This does not // have the same restrictions as cluster name, i.e. it may be arbitrary // length. string service_name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // Resource locator for EDS. This is mutually exclusive to *service_name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator eds_resource_locator = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // Optionally divide the endpoints in this cluster into subsets defined by // endpoint metadata and selected by route and weighted cluster metadata. // [#next-free-field: 8] message LbSubsetConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.LbSubsetConfig"; // If NO_FALLBACK is selected, a result // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, // any cluster endpoint may be returned (subject to policy, health checks, // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. enum LbSubsetFallbackPolicy { NO_FALLBACK = 0; ANY_ENDPOINT = 1; DEFAULT_SUBSET = 2; } // Specifications for subsets. message LbSubsetSelector { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.LbSubsetConfig.LbSubsetSelector"; // Allows to override top level fallback policy per selector. enum LbSubsetSelectorFallbackPolicy { // If NOT_DEFINED top level config fallback policy is used instead. NOT_DEFINED = 0; // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. NO_FALLBACK = 1; // If ANY_ENDPOINT is selected, any cluster endpoint may be returned // (subject to policy, health checks, etc). ANY_ENDPOINT = 2; // If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. DEFAULT_SUBSET = 3; // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata // keys reduced to // :ref:`fallback_keys_subset`. // It allows for a fallback to a different, less specific selector if some of the keys of // the selector are considered optional. KEYS_SUBSET = 4; } // List of keys to match with the weighted cluster metadata. repeated string keys = 1; // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for // choosing a host, but updating hosts is faster, especially for large numbers of hosts. // // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy. // // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains // only one entry. // // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys` // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are // present in the current configuration. bool single_host_per_subset = 4; // The behavior used when no endpoint subset matches the selected route's // metadata. LbSubsetSelectorFallbackPolicy fallback_policy = 2 [(validate.rules).enum = {defined_only: true}]; // Subset of // :ref:`keys` used by // :ref:`KEYS_SUBSET` // fallback policy. // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. // For any other fallback policy the parameter is not used and should not be set. // Only values also present in // :ref:`keys` are allowed, but // `fallback_keys_subset` cannot be equal to `keys`. repeated string fallback_keys_subset = 3; } // The behavior used when no endpoint subset matches the selected route's // metadata. The value defaults to // :ref:`NO_FALLBACK`. LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; // Specifies the default subset of endpoints used during fallback if // fallback_policy is // :ref:`DEFAULT_SUBSET`. // Each field in default_subset is // compared to the matching LbEndpoint.Metadata under the *envoy.lb* // namespace. It is valid for no hosts to match, in which case the behavior // is the same as a fallback_policy of // :ref:`NO_FALLBACK`. google.protobuf.Struct default_subset = 2; // For each entry, LbEndpoint.Metadata's // *envoy.lb* namespace is traversed and a subset is created for each unique // combination of key and value. For example: // // .. code-block:: json // // { "subset_selectors": [ // { "keys": [ "version" ] }, // { "keys": [ "stage", "hardware_type" ] } // ]} // // A subset is matched when the metadata from the selected route and // weighted cluster contains the same keys and values as the subset's // metadata. The same host may appear in multiple subsets. repeated LbSubsetSelector subset_selectors = 3; // If true, routing to subsets will take into account the localities and locality weights of the // endpoints when making the routing decision. // // There are some potential pitfalls associated with enabling this feature, as the resulting // traffic split after applying both a subset match and locality weights might be undesirable. // // Consider for example a situation in which you have 50/50 split across two localities X/Y // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 // host selected but Y having 100, then a lot more load is being dumped on the single host in X // than originally anticipated in the load balancing assignment delivered via EDS. bool locality_weight_aware = 4; // When used with locality_weight_aware, scales the weight of each locality by the ratio // of hosts in the subset vs hosts in the original subset. This aims to even out the load // going to an individual locality if said locality is disproportionately affected by the // subset predicate. bool scale_locality_weight = 5; // If true, when a fallback policy is configured and its corresponding subset fails to find // a host this will cause any host to be selected instead. // // This is useful when using the default subset as the fallback policy, given the default // subset might become empty. With this option enabled, if that happens the LB will attempt // to select a host from the entire cluster. bool panic_mode_any = 6; // If true, metadata specified for a metadata key will be matched against the corresponding // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value // and any of the elements in the list matches the criteria. bool list_as_any = 7; } // Specific configuration for the LeastRequest load balancing policy. message LeastRequestLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.LeastRequestLbConfig"; // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; // The following formula is used to calculate the dynamic weights when hosts have different load // balancing weights: // // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` // // The larger the active request bias is, the more aggressively active requests will lower the // effective weight when all host weights are not equal. // // `active_request_bias` must be greater than or equal to 0.0. // // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number // of active requests at the time it picks a host and behaves like the Round Robin Load // Balancer. // // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing // weight by the number of active requests at the time it does a pick. // // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's // host sets changes, e.g., whenever there is a host membership update or a host load balancing // weight change. // // .. note:: // This setting only takes effect if all host weights are not equal. core.v3.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` // load balancing policy. message RingHashLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.RingHashLbConfig"; // The hash function used to hash hosts onto the ketama ring. enum HashFunction { // Use `xxHash `_, this is the default hash function. XX_HASH = 0; // Use `MurmurHash2 `_, this is compatible with // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled // on Linux and not macOS. MURMUR_HASH_2 = 1; } reserved 2; // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each // provided host) the better the request distribution will reflect the desired weights. Defaults // to 1024 entries, and limited to 8M entries. See also // :ref:`maximum_ring_size`. google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; // The hash function used to hash hosts onto the ketama ring. The value defaults to // :ref:`XX_HASH`. HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered // to further constrain resource use. See also // :ref:`minimum_ring_size`. google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; } // Specific configuration for the :ref:`Maglev` // load balancing policy. message MaglevLbConfig { // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same // upstream as it was before. Increasing the table size reduces the amount of disruption. // The table size must be prime number. If it is not specified, the default is 65537. google.protobuf.UInt64Value table_size = 1; } // Specific configuration for the // :ref:`Original Destination ` // load balancing policy. message OriginalDstLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.OriginalDstLbConfig"; // When true, :ref:`x-envoy-original-dst-host // ` can be used to override destination // address. // // .. attention:: // // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. bool use_http_header = 1; } // Common configuration for all load balancer implementations. // [#next-free-field: 8] message CommonLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.CommonLbConfig"; // Configuration for :ref:`zone aware routing // `. message ZoneAwareLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.CommonLbConfig.ZoneAwareLbConfig"; // Configures percentage of requests that will be considered for zone aware routing // if zone aware routing is configured. If not specified, the default is 100%. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. type.v3.Percent routing_enabled = 1; // Configures minimum upstream cluster size required for zone aware routing // If upstream cluster size is less than specified, zone aware routing is not performed // even if zone aware routing is configured. If not specified, the default is 6. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. google.protobuf.UInt64Value min_cluster_size = 2; // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic // mode`. Instead, the cluster will fail all // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a // failing service. bool fail_traffic_on_panic = 3; } // Configuration for :ref:`locality weighted load balancing // ` message LocalityWeightedLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; } // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) message ConsistentHashingLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; // If set to `true`, the cluster will use hostname instead of the resolved // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. bool use_hostname_for_hashing = 1; // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. // Minimum is 100. // // Applies to both Ring Hash and Maglev load balancers. // // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the // cascading overflow effect when choosing the next host in the ring/table). // // If weights are specified on the hosts, they are respected. // // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts // being probed, so use a higher value if you require better performance. google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}]; } // Configures the :ref:`healthy panic threshold `. // If not specified, the default is 50%. // To disable panic mode, set to 0%. // // .. note:: // The specified percent will be truncated to the nearest 1%. type.v3.Percent healthy_panic_threshold = 1; oneof locality_config_specifier { ZoneAwareLbConfig zone_aware_lb_config = 2; LocalityWeightedLbConfig locality_weighted_lb_config = 3; } // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when // the first update happens. This is useful for big clusters, with potentially noisy deploys // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new // cluster). Please always keep in mind that the use of sandbox technologies may change this // behavior. // // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge // window to 0. // // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is // because merging those updates isn't currently safe. See // https://github.com/envoyproxy/envoy/pull/3941. google.protobuf.Duration update_merge_window = 4; // If set to true, Envoy will not consider new hosts when computing load balancing weights until // they have been health checked for the first time. This will have no effect unless // active health checking is also configured. // // Ignoring a host means that for any load balancing calculations that adjust weights based // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and // panic mode) Envoy will exclude these hosts in the denominator. // // For example, with hosts in two priorities P0 and P1, where P0 looks like // {healthy, unhealthy (new), unhealthy (new)} // and where P1 looks like // {healthy, healthy} // all traffic will still hit P0, as 1 / (3 - 2) = 1. // // Enabling this will allow scaling up the number of hosts for a given cluster without entering // panic mode or triggering priority spillover, assuming the hosts pass the first health check. // // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not // contribute to the calculation when deciding whether panic mode is enabled or not. bool ignore_new_hosts_until_first_hc = 5; // If set to `true`, the cluster manager will drain all existing // connections to upstream hosts whenever hosts are added or removed from the cluster. bool close_connections_on_host_set_change = 6; // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) ConsistentHashingLbConfig consistent_hashing_lb_config = 7; } message RefreshRate { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Cluster.RefreshRate"; // Specifies the base interval between refreshes. This parameter is required and must be greater // than zero and less than // :ref:`max_interval `. google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { required: true gt {nanos: 1000000} }]; // Specifies the maximum interval between refreshes. This parameter is optional, but must be // greater than or equal to the // :ref:`base_interval ` if set. The default // is 10 times the :ref:`base_interval `. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } // [#not-implemented-hide:] message PrefetchPolicy { // Indicates how many streams (rounded up) can be anticipated per-upstream for each // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching // will only be done if the upstream is healthy. // // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be // established, one for the new incoming stream, and one for a presumed follow-up stream. For // HTTP/2, only one connection would be established by default as one connection can // serve both the original and presumed follow-up stream. // // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 // active streams, there would be 100 connections in use, and 50 connections prefetched. // This might be a useful value for something like short lived single-use connections, // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue // in case of unexpected disconnects where the connection could not be reused. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight. This means in steady state if a connection is torn down, // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be // prefetched. // // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can // harm latency more than the prefetching helps. google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; // Indicates how many many streams (rounded up) can be anticipated across a cluster for each // stream, useful for low QPS services. This is currently supported for a subset of // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a // cluster, doing best effort predictions of what upstream would be picked next and // pre-establishing a connection. // // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first // incoming stream, 2 connections will be prefetched - one to the first upstream for this // cluster, one to the second on the assumption there will be a follow-up stream. // // Prefetching will be limited to one prefetch per configured upstream in the cluster. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight, so during warm up and in steady state if a connection // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for // connection establishment. // // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. google.protobuf.DoubleValue predictive_prefetch_ratio = 2 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } reserved 12, 15, 7, 11, 35; reserved "hosts", "tls_context", "extension_protocol_options"; // Configuration to use different transport sockets for different endpoints. // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. // For example, with the following match // // .. code-block:: yaml // // transport_socket_matches: // - name: "enableMTLS" // match: // acceptMTLS: true // transport_socket: // name: envoy.transport_sockets.tls // config: { ... } # tls socket configuration // - name: "defaultToPlaintext" // match: {} // transport_socket: // name: envoy.transport_sockets.raw_buffer // // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // // This field allows gradual and flexible transport socket configuration changes. // // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic // has "acceptPlaintext": "true" metadata information. // // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // // This field can be used to specify custom transport socket configurations for health // checks by adding matching key/value pairs in a health check's // :ref:`transport socket match criteria ` field. // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; // Supplies the name of the cluster which must be unique across all clusters. // The cluster name is used when emitting // :ref:`statistics ` if :ref:`alt_stat_name // ` is not provided. // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. string name = 1 [(validate.rules).string = {min_len: 1}]; // An optional alternative to the cluster name to be used while emitting stats. // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be // confused with :ref:`Router Filter Header // `. string alt_stat_name = 28; oneof cluster_discovery_type { // The :ref:`service discovery type ` // to use for resolving the cluster. DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; // The custom cluster type. CustomClusterType cluster_type = 38; } // Configuration to use for EDS updates for the Cluster. EdsClusterConfig eds_cluster_config = 3; // The timeout for new network connections to hosts in the cluster. google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, // :ref:`STRICT_DNS` // or :ref:`LOGICAL_DNS` clusters. // This field supersedes the *hosts* field in the v2 API. // // .. attention:: // // Setting this allows non-EDS cluster types to contain embedded EDS equivalent // :ref:`endpoint assignments`. // endpoint.v3.ClusterLoadAssignment load_assignment = 33; // Optional :ref:`active health checking ` // configuration for the cluster. If no // configuration is specified no health checking will be done and all cluster // members will be considered healthy at all times. repeated core.v3.HealthCheck health_checks = 8; // Optional maximum requests for a single upstream connection. This parameter // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. google.protobuf.UInt32Value max_requests_per_connection = 9; // Optional :ref:`circuit breaking ` for the cluster. CircuitBreakers circuit_breakers = 10; // HTTP protocol options that are applied only to upstream HTTP connections. // These options apply to all HTTP versions. core.v3.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; // Additional options when handling HTTP requests upstream. These options will be applicable to // both HTTP1 and HTTP2 requests. core.v3.HttpProtocolOptions common_http_protocol_options = 29; // Additional options when handling HTTP1 requests. core.v3.Http1ProtocolOptions http_protocol_options = 13; // Even if default HTTP2 protocol options are desired, this field must be // set so that Envoy will assume that the upstream supports HTTP/2 when // making new HTTP connection pool connections. Currently, Envoy only // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. core.v3.Http2ProtocolOptions http2_protocol_options = 14 [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. map typed_extension_protocol_options = 36; // If the DNS refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this value is used as the cluster’s DNS refresh // rate. The value configured must be at least 1ms. If this setting is not specified, the // value defaults to 5000ms. For cluster types other than // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration = {gt {nanos: 1000000}}]; // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types // other than :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS` this setting is // ignored. RefreshRate dns_failure_refresh_rate = 44; // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS // resolution. bool respect_dns_ttl = 39; // The DNS IP address resolution policy. If this setting is not specified, the // value defaults to // :ref:`AUTO`. DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; // If DNS resolvers are specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this value is used to specify the cluster’s dns resolvers. // If this setting is not specified, the value defaults to the default // resolver, which uses /etc/resolv.conf for configuration. For cluster types // other than // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated core.v3.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. OutlierDetection outlier_detection = 19; // The interval for removing stale hosts from a cluster type // :ref:`ORIGINAL_DST`. // Hosts are considered stale if they have not been used // as upstream destinations during this interval. New hosts are added // to original destination clusters on demand as new connections are // redirected to Envoy, causing the number of hosts in the cluster to // grow over time. Hosts that are not stale (they are actively used as // destinations) are kept in the cluster, which allows connections to // them remain open, saving the latency that would otherwise be spent // on opening new connections. If this setting is not specified, the // value defaults to 5000ms. For cluster types other than // :ref:`ORIGINAL_DST` // this setting is ignored. google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; // Optional configuration used to bind newly established upstream connections. // This overrides any bind_config specified in the bootstrap proto. // If the address and port are empty, no bind will be performed. core.v3.BindConfig upstream_bind_config = 21; // Configuration for load balancing subsetting. LbSubsetConfig lb_subset_config = 22; // Optional configuration for the load balancing algorithm selected by // LbPolicy. Currently only // :ref:`RING_HASH`, // :ref:`MAGLEV` and // :ref:`LEAST_REQUEST` // has additional configuration options. // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding // LbPolicy will generate an error at runtime. oneof lb_config { // Optional configuration for the Ring Hash load balancing policy. RingHashLbConfig ring_hash_lb_config = 23; // Optional configuration for the Maglev load balancing policy. MaglevLbConfig maglev_lb_config = 52; // Optional configuration for the Original Destination load balancing policy. OriginalDstLbConfig original_dst_lb_config = 34; // Optional configuration for the LeastRequest load balancing policy. LeastRequestLbConfig least_request_lb_config = 37; } // Common configuration for all load balancer implementations. CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. // To setup TLS, set a transport socket with name `tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. core.v3.TransportSocket transport_socket = 24; // The Metadata field can be used to provide additional information about the // cluster. It can be used for stats, logging, and varying filter behavior. // Fields should use reverse DNS notation to denote which entity within Envoy // will need the information. For instance, if the metadata is intended for // the Router filter, the filter name should be specified as *envoy.filters.http.router*. core.v3.Metadata metadata = 25; // Determines how Envoy selects the protocol used to speak to upstream hosts. ClusterProtocolSelection protocol_selection = 26; // Optional options for upstream connections. UpstreamConnectionOptions upstream_connection_options = 30; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. // // .. note:: // // This is currently only supported for connections created by tcp_proxy. // // .. note:: // // The current implementation of this feature closes all connections immediately when // the unhealthy status is detected. If there are a large number of connections open // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of // time exclusively closing these connections, and not processing any other traffic. bool close_connections_on_host_health_failure = 31; // If set to true, Envoy will ignore the health value of a host when processing its removal // from service discovery. This means that if active health checking is used, Envoy will *not* // wait for the endpoint to go unhealthy before removing it. bool ignore_health_on_host_removal = 32; // An (optional) network filter chain, listed in the order the filters should be applied. // The chain will be applied to all outgoing connections that Envoy makes to the upstream // servers of this cluster. repeated Filter filters = 40; // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. LoadBalancingPolicy load_balancing_policy = 41; // [#not-implemented-hide:] // If present, tells the client where to send load reports via LRS. If not present, the // client will fall back to a client-side default, which may be either (a) don't send any // load reports or (b) send load reports for all clusters to a single default server // (which may be configured in the bootstrap file). // // Note that if multiple clusters point to the same LRS server, the client may choose to // create a separate stream for each cluster or it may choose to coalesce the data for // multiple clusters onto a single stream. Either way, the client must make sure to send // the data for any given cluster on no more than one stream. // // [#next-major-version: In the v3 API, we should consider restructuring this somehow, // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation // from the LRS stream here.] core.v3.ConfigSource lrs_server = 42; // If track_timeout_budgets is true, the :ref:`timeout budget histograms // ` will be published for each // request. These show what percentage of a request's per try and global timeout was used. A value // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. // // .. attention:: // // This field has been deprecated in favor of `timeout_budgets`, part of // :ref:`track_cluster_stats `. bool track_timeout_budgets = 47 [deprecated = true]; // Optional customization and configuration of upstream connection pool, and upstream type. // // Currently this field only applies for HTTP traffic but is designed for eventual use for custom // TCP upstreams. // // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream // HTTP, using the http connection pool and the codec from `http2_protocol_options` // // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. // // The default pool used is the generic connection pool which creates the HTTP upstream for most // HTTP requests, and the TCP upstream if CONNECT termination is configured. // // If users desire custom connection pool or upstream behavior, for example terminating // CONNECT only if a custom filter indicates it is appropriate, the custom factories // can be registered and configured here. core.v3.TypedExtensionConfig upstream_config = 48; // Configuration to track optional cluster stats. TrackClusterStats track_cluster_stats = 49; // [#not-implemented-hide:] // Prefetch configuration for this cluster. PrefetchPolicy prefetch_policy = 50; // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate // connection pool for every downstream connection bool connection_pool_per_downstream_connection = 51; } // [#not-implemented-hide:] Extensible load balancing policy configuration. // // Every LB policy defined via this mechanism will be identified via a unique name using reverse // DNS notation. If the policy needs configuration parameters, it must define a message for its // own configuration, which will be stored in the config field. The name of the policy will tell // clients which type of message they should expect to see in the config field. // // Note that there are cases where it is useful to be able to independently select LB policies // for choosing a locality and for choosing an endpoint within that locality. For example, a // given deployment may always use the same policy to choose the locality, but for choosing the // endpoint within the locality, some clusters may use weighted-round-robin, while others may // use some sort of session-based balancing. // // This can be accomplished via hierarchical LB policies, where the parent LB policy creates a // child LB policy for each locality. For each request, the parent chooses the locality and then // delegates to the child policy for that locality to choose the endpoint within the locality. // // To facilitate this, the config message for the top-level LB policy may include a field of // type LoadBalancingPolicy that specifies the child policy. message LoadBalancingPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LoadBalancingPolicy"; message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LoadBalancingPolicy.Policy"; reserved 2; reserved "config"; // Required. The name of the LB policy. string name = 1; google.protobuf.Any typed_config = 3; } // Each client will iterate over the list in order and stop at the first policy that it // supports. This provides a mechanism for starting to use new LB policies that are not yet // supported by all clients. repeated Policy policies = 1; } // An extensible structure containing the address Envoy should bind to when // establishing upstream connections. message UpstreamBindConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.UpstreamBindConfig"; // The address Envoy should bind to when establishing upstream connections. core.v3.Address source_address = 1; } message UpstreamConnectionOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.UpstreamConnectionOptions"; // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v3.TcpKeepalive tcp_keepalive = 1; } message TrackClusterStats { // If timeout_budgets is true, the :ref:`timeout budget histograms // ` will be published for each // request. These show what percentage of a request's per try and global timeout was used. A value // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. bool timeout_budgets = 1; // If request_response_sizes is true, then the :ref:`histograms // ` tracking header and body sizes // of requests and responses will be published. bool request_response_sizes = 2; } ================================================ FILE: api/envoy/config/cluster/v3/filter.proto ================================================ syntax = "proto3"; package envoy.config.cluster.v3; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "FilterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Upstream filters] // Upstream filters apply to the connections to the upstream cluster hosts. message Filter { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.Filter"; // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. google.protobuf.Any typed_config = 2; } ================================================ FILE: api/envoy/config/cluster/v3/outlier_detection.proto ================================================ syntax = "proto3"; package envoy.config.cluster.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v3"; option java_outer_classname = "OutlierDetectionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Outlier detection] // See the :ref:`architecture overview ` for // more information on outlier detection. // [#next-free-field: 21] message OutlierDetection { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.cluster.OutlierDetection"; // The number of consecutive 5xx responses or local origin errors that are mapped // to 5xx error codes before a consecutive 5xx ejection // occurs. Defaults to 5. google.protobuf.UInt32Value consecutive_5xx = 1; // The time interval between ejection analysis sweeps. This can result in // both new ejections as well as hosts being returned to service. Defaults // to 10000ms or 10s. google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the // base time multiplied by the number of times the host has been ejected. // Defaults to 30000ms or 30s. google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; // The maximum % of an upstream cluster that can be ejected due to outlier // detection. Defaults to 10% but will eject at least one host regardless of the value. google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive 5xx. This setting can be used to disable // ejection or to ramp it up slowly. Defaults to 100. google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics. This setting can be used to // disable ejection or to ramp it up slowly. Defaults to 100. google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; // The number of hosts in a cluster that must have enough request volume to // detect success rate outliers. If the number of hosts is less than this // setting, outlier detection via success rate statistics is not performed // for any host in the cluster. Defaults to 5. google.protobuf.UInt32Value success_rate_minimum_hosts = 7; // The minimum number of total requests that must be collected in one // interval (as defined by the interval duration above) to include this host // in success rate based outlier detection. If the volume is lower than this // setting, outlier detection via success rate statistics is not performed // for that host. Defaults to 100. google.protobuf.UInt32Value success_rate_request_volume = 8; // This factor is used to determine the ejection threshold for success rate // outlier ejection. The ejection threshold is the difference between the // mean success rate, and the product of this factor and the standard // deviation of the mean success rate: mean - (stdev * // success_rate_stdev_factor). This factor is divided by a thousand to get a // double. That is, if the desired factor is 1.9, the runtime value should // be 1900. Defaults to 1900. google.protobuf.UInt32Value success_rate_stdev_factor = 9; // The number of consecutive gateway failures (502, 503, 504 status codes) // before a consecutive gateway failure ejection occurs. Defaults to 5. google.protobuf.UInt32Value consecutive_gateway_failure = 10; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive gateway failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 [(validate.rules).uint32 = {lte: 100}]; // Determines whether to distinguish local origin failures from external errors. If set to true // the following configuration parameters are taken into account: // :ref:`consecutive_local_origin_failure`, // :ref:`enforcing_consecutive_local_origin_failure` // and // :ref:`enforcing_local_origin_success_rate`. // Defaults to false. bool split_external_local_origin_errors = 12; // The number of consecutive locally originated failures before ejection // occurs. Defaults to 5. Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value consecutive_local_origin_failure = 13; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive locally originated failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 100. // Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics for locally originated errors. // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. // Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 [(validate.rules).uint32 = {lte: 100}]; // The failure percentage to use when determining failure percentage-based outlier detection. If // the failure percentage of a given host is greater than or equal to this value, it will be // ejected. Defaults to 85. google.protobuf.UInt32Value failure_percentage_threshold = 16 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // failure percentage statistics. This setting can be used to disable ejection or to ramp it up // slowly. Defaults to 0. // // [#next-major-version: setting this without setting failure_percentage_threshold should be // invalid in v4.] google.protobuf.UInt32Value enforcing_failure_percentage = 17 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // local-origin failure percentage statistics. This setting can be used to disable ejection or to // ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 [(validate.rules).uint32 = {lte: 100}]; // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. // If the total number of hosts in the cluster is less than this value, failure percentage-based // ejection will not be performed. Defaults to 5. google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; // The minimum number of total requests that must be collected in one interval (as defined by the // interval duration above) to perform failure percentage-based ejection for this host. If the // volume is lower than this setting, failure percentage-based ejection will not be performed for // this host. Defaults to 50. google.protobuf.UInt32Value failure_percentage_request_volume = 20; } ================================================ FILE: api/envoy/config/cluster/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/cluster/v3:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/config/cluster/v4alpha/circuit_breaker.proto ================================================ syntax = "proto3"; package envoy.config.cluster.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; option java_outer_classname = "CircuitBreakerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Circuit breakers] // :ref:`Circuit breaking` settings can be // specified individually for each defined priority. message CircuitBreakers { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.CircuitBreakers"; // A Thresholds defines CircuitBreaker settings for a // :ref:`RoutingPriority`. // [#next-free-field: 9] message Thresholds { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.CircuitBreakers.Thresholds"; message RetryBudget { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget"; // Specifies the limit on concurrent retries as a percentage of the sum of active requests and // active pending requests. For example, if there are 100 active requests and the // budget_percent is set to 25, there may be 25 active retries. // // This parameter is optional. Defaults to 20%. type.v3.Percent budget_percent = 1; // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the // number of active retries may never go below this number. // // This parameter is optional. Defaults to 3. google.protobuf.UInt32Value min_retry_concurrency = 2; } // The :ref:`RoutingPriority` // the specified CircuitBreaker settings apply to. core.v4alpha.RoutingPriority priority = 1 [(validate.rules).enum = {defined_only: true}]; // The maximum number of connections that Envoy will make to the upstream // cluster. If not specified, the default is 1024. google.protobuf.UInt32Value max_connections = 2; // The maximum number of pending requests that Envoy will allow to the // upstream cluster. If not specified, the default is 1024. google.protobuf.UInt32Value max_pending_requests = 3; // The maximum number of parallel requests that Envoy will make to the // upstream cluster. If not specified, the default is 1024. google.protobuf.UInt32Value max_requests = 4; // The maximum number of parallel retries that Envoy will allow to the // upstream cluster. If not specified, the default is 3. google.protobuf.UInt32Value max_retries = 5; // Specifies a limit on concurrent retries in relation to the number of active requests. This // parameter is optional. // // .. note:: // // If this field is set, the retry budget will override any configured retry circuit // breaker. RetryBudget retry_budget = 8; // If track_remaining is true, then stats will be published that expose // the number of resources remaining until the circuit breakers open. If // not specified, the default is false. // // .. note:: // // If a retry budget is used in lieu of the max_retries circuit breaker, // the remaining retry resources remaining will not be tracked. bool track_remaining = 6; // The maximum number of connection pools per cluster that Envoy will concurrently support at // once. If not specified, the default is unlimited. Set this for clusters which create a // large number of connection pools. See // :ref:`Circuit Breaking ` for // more details. google.protobuf.UInt32Value max_connection_pools = 7; } // If multiple :ref:`Thresholds` // are defined with the same :ref:`RoutingPriority`, // the first one in the list is used. If no Thresholds is defined for a given // :ref:`RoutingPriority`, the default values // are used. repeated Thresholds thresholds = 1; } ================================================ FILE: api/envoy/config/cluster/v4alpha/cluster.proto ================================================ syntax = "proto3"; package envoy.config.cluster.v4alpha; import "envoy/config/cluster/v4alpha/circuit_breaker.proto"; import "envoy/config/cluster/v4alpha/filter.proto"; import "envoy/config/cluster/v4alpha/outlier_detection.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/health_check.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/endpoint/v3/endpoint.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/collection_entry.proto"; import "udpa/core/v1/resource_locator.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Cluster configuration] // Cluster list collections. Entries are *Cluster* resources or references. // [#not-implemented-hide:] message ClusterCollection { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.ClusterCollection"; udpa.core.v1.CollectionEntry entries = 1; } // Configuration for a single upstream cluster. // [#next-free-field: 53] message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster"; // Refer to :ref:`service discovery type ` // for an explanation on each type. enum DiscoveryType { // Refer to the :ref:`static discovery type` // for an explanation. STATIC = 0; // Refer to the :ref:`strict DNS discovery // type` // for an explanation. STRICT_DNS = 1; // Refer to the :ref:`logical DNS discovery // type` // for an explanation. LOGICAL_DNS = 2; // Refer to the :ref:`service discovery type` // for an explanation. EDS = 3; // Refer to the :ref:`original destination discovery // type` // for an explanation. ORIGINAL_DST = 4; } // Refer to :ref:`load balancer type ` architecture // overview section for information on each type. enum LbPolicy { reserved 4; reserved "ORIGINAL_DST_LB"; // Refer to the :ref:`round robin load balancing // policy` // for an explanation. ROUND_ROBIN = 0; // Refer to the :ref:`least request load balancing // policy` // for an explanation. LEAST_REQUEST = 1; // Refer to the :ref:`ring hash load balancing // policy` // for an explanation. RING_HASH = 2; // Refer to the :ref:`random load balancing // policy` // for an explanation. RANDOM = 3; // Refer to the :ref:`Maglev load balancing policy` // for an explanation. MAGLEV = 5; // This load balancer type must be specified if the configured cluster provides a cluster // specific load balancer. Consult the configured cluster's documentation for whether to set // this option or not. CLUSTER_PROVIDED = 6; // [#not-implemented-hide:] Use the new :ref:`load_balancing_policy // ` field to determine the LB policy. // [#next-major-version: In the v3 API, we should consider deprecating the lb_policy field // and instead using the new load_balancing_policy field as the one and only mechanism for // configuring this.] LOAD_BALANCING_POLICY_CONFIG = 7; } // When V4_ONLY is selected, the DNS resolver will only perform a lookup for // addresses in the IPv4 family. If V6_ONLY is selected, the DNS resolver will // only perform a lookup for addresses in the IPv6 family. If AUTO is // specified, the DNS resolver will first perform a lookup for addresses in // the IPv6 family and fallback to a lookup for addresses in the IPv4 family. // For cluster types other than // :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS`, // this setting is // ignored. enum DnsLookupFamily { AUTO = 0; V4_ONLY = 1; V6_ONLY = 2; } enum ClusterProtocolSelection { // Cluster can only operate on one of the possible upstream protocols (HTTP1.1, HTTP2). // If :ref:`http2_protocol_options ` are // present, HTTP2 will be used, otherwise HTTP1.1 will be used. USE_CONFIGURED_PROTOCOL = 0; // Use HTTP1.1 or HTTP2, depending on which one is used on the downstream connection. USE_DOWNSTREAM_PROTOCOL = 1; } // TransportSocketMatch specifies what transport socket config will be used // when the match conditions are satisfied. message TransportSocketMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.TransportSocketMatch"; // The name of the match, used in stats generation. string name = 1 [(validate.rules).string = {min_len: 1}]; // Optional endpoint metadata match criteria. // The connection to the endpoint with metadata matching what is set in this field // will use the transport socket configuration specified here. // The endpoint's metadata entry in *envoy.transport_socket_match* is used to match // against the values specified in this field. google.protobuf.Struct match = 2; // The configuration of the transport socket. core.v4alpha.TransportSocket transport_socket = 3; } // Extended cluster type. message CustomClusterType { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.CustomClusterType"; // The type of the cluster to instantiate. The name must match a supported cluster type. string name = 1 [(validate.rules).string = {min_len: 1}]; // Cluster specific configuration which depends on the cluster being instantiated. // See the supported cluster for further documentation. google.protobuf.Any typed_config = 2; } // Only valid when discovery type is EDS. message EdsClusterConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.EdsClusterConfig"; // Configuration for the source of EDS updates for this Cluster. core.v4alpha.ConfigSource eds_config = 1; oneof name_specifier { // Optional alternative to cluster name to present to EDS. This does not // have the same restrictions as cluster name, i.e. it may be arbitrary // length. string service_name = 2; // Resource locator for EDS. This is mutually exclusive to *service_name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator eds_resource_locator = 3; } } // Optionally divide the endpoints in this cluster into subsets defined by // endpoint metadata and selected by route and weighted cluster metadata. // [#next-free-field: 8] message LbSubsetConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.LbSubsetConfig"; // If NO_FALLBACK is selected, a result // equivalent to no healthy hosts is reported. If ANY_ENDPOINT is selected, // any cluster endpoint may be returned (subject to policy, health checks, // etc). If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. enum LbSubsetFallbackPolicy { NO_FALLBACK = 0; ANY_ENDPOINT = 1; DEFAULT_SUBSET = 2; } // Specifications for subsets. message LbSubsetSelector { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.LbSubsetConfig.LbSubsetSelector"; // Allows to override top level fallback policy per selector. enum LbSubsetSelectorFallbackPolicy { // If NOT_DEFINED top level config fallback policy is used instead. NOT_DEFINED = 0; // If NO_FALLBACK is selected, a result equivalent to no healthy hosts is reported. NO_FALLBACK = 1; // If ANY_ENDPOINT is selected, any cluster endpoint may be returned // (subject to policy, health checks, etc). ANY_ENDPOINT = 2; // If DEFAULT_SUBSET is selected, load balancing is performed over the // endpoints matching the values from the default_subset field. DEFAULT_SUBSET = 3; // If KEYS_SUBSET is selected, subset selector matching is performed again with metadata // keys reduced to // :ref:`fallback_keys_subset`. // It allows for a fallback to a different, less specific selector if some of the keys of // the selector are considered optional. KEYS_SUBSET = 4; } // List of keys to match with the weighted cluster metadata. repeated string keys = 1; // Selects a mode of operation in which each subset has only one host. This mode uses the same rules for // choosing a host, but updating hosts is faster, especially for large numbers of hosts. // // If a match is found to a host, that host will be used regardless of priority levels, unless the host is unhealthy. // // Currently, this mode is only supported if `subset_selectors` has only one entry, and `keys` contains // only one entry. // // When this mode is enabled, configurations that contain more than one host with the same metadata value for the single key in `keys` // will use only one of the hosts with the given key; no requests will be routed to the others. The cluster gauge // :ref:`lb_subsets_single_host_per_subset_duplicate` indicates how many duplicates are // present in the current configuration. bool single_host_per_subset = 4; // The behavior used when no endpoint subset matches the selected route's // metadata. LbSubsetSelectorFallbackPolicy fallback_policy = 2 [(validate.rules).enum = {defined_only: true}]; // Subset of // :ref:`keys` used by // :ref:`KEYS_SUBSET` // fallback policy. // It has to be a non empty list if KEYS_SUBSET fallback policy is selected. // For any other fallback policy the parameter is not used and should not be set. // Only values also present in // :ref:`keys` are allowed, but // `fallback_keys_subset` cannot be equal to `keys`. repeated string fallback_keys_subset = 3; } // The behavior used when no endpoint subset matches the selected route's // metadata. The value defaults to // :ref:`NO_FALLBACK`. LbSubsetFallbackPolicy fallback_policy = 1 [(validate.rules).enum = {defined_only: true}]; // Specifies the default subset of endpoints used during fallback if // fallback_policy is // :ref:`DEFAULT_SUBSET`. // Each field in default_subset is // compared to the matching LbEndpoint.Metadata under the *envoy.lb* // namespace. It is valid for no hosts to match, in which case the behavior // is the same as a fallback_policy of // :ref:`NO_FALLBACK`. google.protobuf.Struct default_subset = 2; // For each entry, LbEndpoint.Metadata's // *envoy.lb* namespace is traversed and a subset is created for each unique // combination of key and value. For example: // // .. code-block:: json // // { "subset_selectors": [ // { "keys": [ "version" ] }, // { "keys": [ "stage", "hardware_type" ] } // ]} // // A subset is matched when the metadata from the selected route and // weighted cluster contains the same keys and values as the subset's // metadata. The same host may appear in multiple subsets. repeated LbSubsetSelector subset_selectors = 3; // If true, routing to subsets will take into account the localities and locality weights of the // endpoints when making the routing decision. // // There are some potential pitfalls associated with enabling this feature, as the resulting // traffic split after applying both a subset match and locality weights might be undesirable. // // Consider for example a situation in which you have 50/50 split across two localities X/Y // which have 100 hosts each without subsetting. If the subset LB results in X having only 1 // host selected but Y having 100, then a lot more load is being dumped on the single host in X // than originally anticipated in the load balancing assignment delivered via EDS. bool locality_weight_aware = 4; // When used with locality_weight_aware, scales the weight of each locality by the ratio // of hosts in the subset vs hosts in the original subset. This aims to even out the load // going to an individual locality if said locality is disproportionately affected by the // subset predicate. bool scale_locality_weight = 5; // If true, when a fallback policy is configured and its corresponding subset fails to find // a host this will cause any host to be selected instead. // // This is useful when using the default subset as the fallback policy, given the default // subset might become empty. With this option enabled, if that happens the LB will attempt // to select a host from the entire cluster. bool panic_mode_any = 6; // If true, metadata specified for a metadata key will be matched against the corresponding // endpoint metadata if the endpoint metadata matches the value exactly OR it is a list value // and any of the elements in the list matches the criteria. bool list_as_any = 7; } // Specific configuration for the LeastRequest load balancing policy. message LeastRequestLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.LeastRequestLbConfig"; // The number of random healthy hosts from which the host with the fewest active requests will // be chosen. Defaults to 2 so that we perform two-choice selection if the field is not set. google.protobuf.UInt32Value choice_count = 1 [(validate.rules).uint32 = {gte: 2}]; // The following formula is used to calculate the dynamic weights when hosts have different load // balancing weights: // // `weight = load_balancing_weight / (active_requests + 1)^active_request_bias` // // The larger the active request bias is, the more aggressively active requests will lower the // effective weight when all host weights are not equal. // // `active_request_bias` must be greater than or equal to 0.0. // // When `active_request_bias == 0.0` the Least Request Load Balancer doesn't consider the number // of active requests at the time it picks a host and behaves like the Round Robin Load // Balancer. // // When `active_request_bias > 0.0` the Least Request Load Balancer scales the load balancing // weight by the number of active requests at the time it does a pick. // // The value is cached for performance reasons and refreshed whenever one of the Load Balancer's // host sets changes, e.g., whenever there is a host membership update or a host load balancing // weight change. // // .. note:: // This setting only takes effect if all host weights are not equal. core.v4alpha.RuntimeDouble active_request_bias = 2; } // Specific configuration for the :ref:`RingHash` // load balancing policy. message RingHashLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.RingHashLbConfig"; // The hash function used to hash hosts onto the ketama ring. enum HashFunction { // Use `xxHash `_, this is the default hash function. XX_HASH = 0; // Use `MurmurHash2 `_, this is compatible with // std:hash in GNU libstdc++ 3.4.20 or above. This is typically the case when compiled // on Linux and not macOS. MURMUR_HASH_2 = 1; } reserved 2; // Minimum hash ring size. The larger the ring is (that is, the more hashes there are for each // provided host) the better the request distribution will reflect the desired weights. Defaults // to 1024 entries, and limited to 8M entries. See also // :ref:`maximum_ring_size`. google.protobuf.UInt64Value minimum_ring_size = 1 [(validate.rules).uint64 = {lte: 8388608}]; // The hash function used to hash hosts onto the ketama ring. The value defaults to // :ref:`XX_HASH`. HashFunction hash_function = 3 [(validate.rules).enum = {defined_only: true}]; // Maximum hash ring size. Defaults to 8M entries, and limited to 8M entries, but can be lowered // to further constrain resource use. See also // :ref:`minimum_ring_size`. google.protobuf.UInt64Value maximum_ring_size = 4 [(validate.rules).uint64 = {lte: 8388608}]; } // Specific configuration for the :ref:`Maglev` // load balancing policy. message MaglevLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.MaglevLbConfig"; // The table size for Maglev hashing. The Maglev aims for ‘minimal disruption’ rather than an absolute guarantee. // Minimal disruption means that when the set of upstreams changes, a connection will likely be sent to the same // upstream as it was before. Increasing the table size reduces the amount of disruption. // The table size must be prime number. If it is not specified, the default is 65537. google.protobuf.UInt64Value table_size = 1; } // Specific configuration for the // :ref:`Original Destination ` // load balancing policy. message OriginalDstLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.OriginalDstLbConfig"; // When true, :ref:`x-envoy-original-dst-host // ` can be used to override destination // address. // // .. attention:: // // This header isn't sanitized by default, so enabling this feature allows HTTP clients to // route traffic to arbitrary hosts and/or ports, which may have serious security // consequences. bool use_http_header = 1; } // Common configuration for all load balancer implementations. // [#next-free-field: 8] message CommonLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.CommonLbConfig"; // Configuration for :ref:`zone aware routing // `. message ZoneAwareLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.CommonLbConfig.ZoneAwareLbConfig"; // Configures percentage of requests that will be considered for zone aware routing // if zone aware routing is configured. If not specified, the default is 100%. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. type.v3.Percent routing_enabled = 1; // Configures minimum upstream cluster size required for zone aware routing // If upstream cluster size is less than specified, zone aware routing is not performed // even if zone aware routing is configured. If not specified, the default is 6. // * :ref:`runtime values `. // * :ref:`Zone aware routing support `. google.protobuf.UInt64Value min_cluster_size = 2; // If set to true, Envoy will not consider any hosts when the cluster is in :ref:`panic // mode`. Instead, the cluster will fail all // requests as if all hosts are unhealthy. This can help avoid potentially overwhelming a // failing service. bool fail_traffic_on_panic = 3; } // Configuration for :ref:`locality weighted load balancing // ` message LocalityWeightedLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.CommonLbConfig.LocalityWeightedLbConfig"; } // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) message ConsistentHashingLbConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.CommonLbConfig.ConsistentHashingLbConfig"; // If set to `true`, the cluster will use hostname instead of the resolved // address as the key to consistently hash to an upstream host. Only valid for StrictDNS clusters with hostnames which resolve to a single IP address. bool use_hostname_for_hashing = 1; // Configures percentage of average cluster load to bound per upstream host. For example, with a value of 150 // no upstream host will get a load more than 1.5 times the average load of all the hosts in the cluster. // If not specified, the load is not bounded for any upstream host. Typical value for this parameter is between 120 and 200. // Minimum is 100. // // Applies to both Ring Hash and Maglev load balancers. // // This is implemented based on the method described in the paper https://arxiv.org/abs/1608.01350. For the specified // `hash_balance_factor`, requests to any upstream host are capped at `hash_balance_factor/100` times the average number of requests // across the cluster. When a request arrives for an upstream host that is currently serving at its max capacity, linear probing // is used to identify an eligible host. Further, the linear probe is implemented using a random jump in hosts ring/table to identify // the eligible host (this technique is as described in the paper https://arxiv.org/abs/1908.08762 - the random jump avoids the // cascading overflow effect when choosing the next host in the ring/table). // // If weights are specified on the hosts, they are respected. // // This is an O(N) algorithm, unlike other load balancers. Using a lower `hash_balance_factor` results in more hosts // being probed, so use a higher value if you require better performance. google.protobuf.UInt32Value hash_balance_factor = 2 [(validate.rules).uint32 = {gte: 100}]; } // Configures the :ref:`healthy panic threshold `. // If not specified, the default is 50%. // To disable panic mode, set to 0%. // // .. note:: // The specified percent will be truncated to the nearest 1%. type.v3.Percent healthy_panic_threshold = 1; oneof locality_config_specifier { ZoneAwareLbConfig zone_aware_lb_config = 2; LocalityWeightedLbConfig locality_weighted_lb_config = 3; } // If set, all health check/weight/metadata updates that happen within this duration will be // merged and delivered in one shot when the duration expires. The start of the duration is when // the first update happens. This is useful for big clusters, with potentially noisy deploys // that might trigger excessive CPU usage due to a constant stream of healthcheck state changes // or metadata updates. The first set of updates to be seen apply immediately (e.g.: a new // cluster). Please always keep in mind that the use of sandbox technologies may change this // behavior. // // If this is not set, we default to a merge window of 1000ms. To disable it, set the merge // window to 0. // // Note: merging does not apply to cluster membership changes (e.g.: adds/removes); this is // because merging those updates isn't currently safe. See // https://github.com/envoyproxy/envoy/pull/3941. google.protobuf.Duration update_merge_window = 4; // If set to true, Envoy will not consider new hosts when computing load balancing weights until // they have been health checked for the first time. This will have no effect unless // active health checking is also configured. // // Ignoring a host means that for any load balancing calculations that adjust weights based // on the ratio of eligible hosts and total hosts (priority spillover, locality weighting and // panic mode) Envoy will exclude these hosts in the denominator. // // For example, with hosts in two priorities P0 and P1, where P0 looks like // {healthy, unhealthy (new), unhealthy (new)} // and where P1 looks like // {healthy, healthy} // all traffic will still hit P0, as 1 / (3 - 2) = 1. // // Enabling this will allow scaling up the number of hosts for a given cluster without entering // panic mode or triggering priority spillover, assuming the hosts pass the first health check. // // If panic mode is triggered, new hosts are still eligible for traffic; they simply do not // contribute to the calculation when deciding whether panic mode is enabled or not. bool ignore_new_hosts_until_first_hc = 5; // If set to `true`, the cluster manager will drain all existing // connections to upstream hosts whenever hosts are added or removed from the cluster. bool close_connections_on_host_set_change = 6; // Common Configuration for all consistent hashing load balancers (MaglevLb, RingHashLb, etc.) ConsistentHashingLbConfig consistent_hashing_lb_config = 7; } message RefreshRate { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.RefreshRate"; // Specifies the base interval between refreshes. This parameter is required and must be greater // than zero and less than // :ref:`max_interval `. google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { required: true gt {nanos: 1000000} }]; // Specifies the maximum interval between refreshes. This parameter is optional, but must be // greater than or equal to the // :ref:`base_interval ` if set. The default // is 10 times the :ref:`base_interval `. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {nanos: 1000000}}]; } // [#not-implemented-hide:] message PrefetchPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Cluster.PrefetchPolicy"; // Indicates how many streams (rounded up) can be anticipated per-upstream for each // incoming stream. This is useful for high-QPS or latency-sensitive services. Prefetching // will only be done if the upstream is healthy. // // For example if this is 2, for an incoming HTTP/1.1 stream, 2 connections will be // established, one for the new incoming stream, and one for a presumed follow-up stream. For // HTTP/2, only one connection would be established by default as one connection can // serve both the original and presumed follow-up stream. // // In steady state for non-multiplexed connections a value of 1.5 would mean if there were 100 // active streams, there would be 100 connections in use, and 50 connections prefetched. // This might be a useful value for something like short lived single-use connections, // for example proxying HTTP/1.1 if keep-alive were false and each stream resulted in connection // termination. It would likely be overkill for long lived connections, such as TCP proxying SMTP // or regular HTTP/1.1 with keep-alive. For long lived traffic, a value of 1.05 would be more // reasonable, where for every 100 connections, 5 prefetched connections would be in the queue // in case of unexpected disconnects where the connection could not be reused. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight. This means in steady state if a connection is torn down, // a subsequent streams will pay an upstream-rtt latency penalty waiting for streams to be // prefetched. // // This is limited somewhat arbitrarily to 3 because prefetching connections too aggressively can // harm latency more than the prefetching helps. google.protobuf.DoubleValue per_upstream_prefetch_ratio = 1 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; // Indicates how many many streams (rounded up) can be anticipated across a cluster for each // stream, useful for low QPS services. This is currently supported for a subset of // deterministic non-hash-based load-balancing algorithms (weighted round robin, random). // Unlike per_upstream_prefetch_ratio this prefetches across the upstream instances in a // cluster, doing best effort predictions of what upstream would be picked next and // pre-establishing a connection. // // For example if prefetching is set to 2 for a round robin HTTP/2 cluster, on the first // incoming stream, 2 connections will be prefetched - one to the first upstream for this // cluster, one to the second on the assumption there will be a follow-up stream. // // Prefetching will be limited to one prefetch per configured upstream in the cluster. // // If this value is not set, or set explicitly to one, Envoy will fetch as many connections // as needed to serve streams in flight, so during warm up and in steady state if a connection // is closed (and per_upstream_prefetch_ratio is not set), there will be a latency hit for // connection establishment. // // If both this and prefetch_ratio are set, Envoy will make sure both predicted needs are met, // basically prefetching max(predictive-prefetch, per-upstream-prefetch), for each upstream. // TODO(alyssawilk) per LB docs and LB overview docs when unhiding. google.protobuf.DoubleValue predictive_prefetch_ratio = 2 [(validate.rules).double = {lte: 3.0 gte: 1.0}]; } reserved 12, 15, 7, 11, 35, 47; reserved "hosts", "tls_context", "extension_protocol_options", "track_timeout_budgets"; // Configuration to use different transport sockets for different endpoints. // The entry of *envoy.transport_socket_match* in the // :ref:`LbEndpoint.Metadata ` // is used to match against the transport sockets as they appear in the list. The first // :ref:`match ` is used. // For example, with the following match // // .. code-block:: yaml // // transport_socket_matches: // - name: "enableMTLS" // match: // acceptMTLS: true // transport_socket: // name: envoy.transport_sockets.tls // config: { ... } # tls socket configuration // - name: "defaultToPlaintext" // match: {} // transport_socket: // name: envoy.transport_sockets.raw_buffer // // Connections to the endpoints whose metadata value under *envoy.transport_socket_match* // having "acceptMTLS"/"true" key/value pair use the "enableMTLS" socket configuration. // // If a :ref:`socket match ` with empty match // criteria is provided, that always match any endpoint. For example, the "defaultToPlaintext" // socket match in case above. // // If an endpoint metadata's value under *envoy.transport_socket_match* does not match any // *TransportSocketMatch*, socket configuration fallbacks to use the *tls_context* or // *transport_socket* specified in this cluster. // // This field allows gradual and flexible transport socket configuration changes. // // The metadata of endpoints in EDS can indicate transport socket capabilities. For example, // an endpoint's metadata can have two key value pairs as "acceptMTLS": "true", // "acceptPlaintext": "true". While some other endpoints, only accepting plaintext traffic // has "acceptPlaintext": "true" metadata information. // // Then the xDS server can configure the CDS to a client, Envoy A, to send mutual TLS // traffic for endpoints with "acceptMTLS": "true", by adding a corresponding // *TransportSocketMatch* in this field. Other client Envoys receive CDS without // *transport_socket_match* set, and still send plain text traffic to the same cluster. // // This field can be used to specify custom transport socket configurations for health // checks by adding matching key/value pairs in a health check's // :ref:`transport socket match criteria ` field. // // [#comment:TODO(incfly): add a detailed architecture doc on intended usage.] repeated TransportSocketMatch transport_socket_matches = 43; // Supplies the name of the cluster which must be unique across all clusters. // The cluster name is used when emitting // :ref:`statistics ` if :ref:`alt_stat_name // ` is not provided. // Any ``:`` in the cluster name will be converted to ``_`` when emitting statistics. string name = 1 [(validate.rules).string = {min_len: 1}]; // An optional alternative to the cluster name to be used while emitting stats. // Any ``:`` in the name will be converted to ``_`` when emitting statistics. This should not be // confused with :ref:`Router Filter Header // `. string alt_stat_name = 28; oneof cluster_discovery_type { // The :ref:`service discovery type ` // to use for resolving the cluster. DiscoveryType type = 2 [(validate.rules).enum = {defined_only: true}]; // The custom cluster type. CustomClusterType cluster_type = 38; } // Configuration to use for EDS updates for the Cluster. EdsClusterConfig eds_cluster_config = 3; // The timeout for new network connections to hosts in the cluster. google.protobuf.Duration connect_timeout = 4 [(validate.rules).duration = {gt {}}]; // Soft limit on size of the cluster’s connections read and write buffers. If // unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The :ref:`load balancer type ` to use // when picking a host in the cluster. // [#comment:TODO: Remove enum constraint :ref:`LOAD_BALANCING_POLICY_CONFIG` when implemented.] LbPolicy lb_policy = 6 [(validate.rules).enum = {defined_only: true not_in: 7}]; // Setting this is required for specifying members of // :ref:`STATIC`, // :ref:`STRICT_DNS` // or :ref:`LOGICAL_DNS` clusters. // This field supersedes the *hosts* field in the v2 API. // // .. attention:: // // Setting this allows non-EDS cluster types to contain embedded EDS equivalent // :ref:`endpoint assignments`. // endpoint.v3.ClusterLoadAssignment load_assignment = 33; // Optional :ref:`active health checking ` // configuration for the cluster. If no // configuration is specified no health checking will be done and all cluster // members will be considered healthy at all times. repeated core.v4alpha.HealthCheck health_checks = 8; // Optional maximum requests for a single upstream connection. This parameter // is respected by both the HTTP/1.1 and HTTP/2 connection pool // implementations. If not specified, there is no limit. Setting this // parameter to 1 will effectively disable keep alive. google.protobuf.UInt32Value max_requests_per_connection = 9; // Optional :ref:`circuit breaking ` for the cluster. CircuitBreakers circuit_breakers = 10; // HTTP protocol options that are applied only to upstream HTTP connections. // These options apply to all HTTP versions. core.v4alpha.UpstreamHttpProtocolOptions upstream_http_protocol_options = 46; // Additional options when handling HTTP requests upstream. These options will be applicable to // both HTTP1 and HTTP2 requests. core.v4alpha.HttpProtocolOptions common_http_protocol_options = 29; // Additional options when handling HTTP1 requests. core.v4alpha.Http1ProtocolOptions http_protocol_options = 13; // Even if default HTTP2 protocol options are desired, this field must be // set so that Envoy will assume that the upstream supports HTTP/2 when // making new HTTP connection pool connections. Currently, Envoy only // supports prior knowledge for upstream connections. Even if TLS is used // with ALPN, `http2_protocol_options` must be specified. As an aside this allows HTTP/2 // connections to happen over plain text. core.v4alpha.Http2ProtocolOptions http2_protocol_options = 14 [(udpa.annotations.security).configure_for_untrusted_upstream = true]; // The extension_protocol_options field is used to provide extension-specific protocol options // for upstream connections. The key should match the extension filter name, such as // "envoy.filters.network.thrift_proxy". See the extension's documentation for details on // specific options. map typed_extension_protocol_options = 36; // If the DNS refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this value is used as the cluster’s DNS refresh // rate. The value configured must be at least 1ms. If this setting is not specified, the // value defaults to 5000ms. For cluster types other than // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. google.protobuf.Duration dns_refresh_rate = 16 [(validate.rules).duration = {gt {nanos: 1000000}}]; // If the DNS failure refresh rate is specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this is used as the cluster’s DNS refresh rate when requests are failing. If this setting is // not specified, the failure refresh rate defaults to the DNS refresh rate. For cluster types // other than :ref:`STRICT_DNS` and // :ref:`LOGICAL_DNS` this setting is // ignored. RefreshRate dns_failure_refresh_rate = 44; // Optional configuration for setting cluster's DNS refresh rate. If the value is set to true, // cluster's DNS refresh rate will be set to resource record's TTL which comes from DNS // resolution. bool respect_dns_ttl = 39; // The DNS IP address resolution policy. If this setting is not specified, the // value defaults to // :ref:`AUTO`. DnsLookupFamily dns_lookup_family = 17 [(validate.rules).enum = {defined_only: true}]; // If DNS resolvers are specified and the cluster type is either // :ref:`STRICT_DNS`, // or :ref:`LOGICAL_DNS`, // this value is used to specify the cluster’s dns resolvers. // If this setting is not specified, the value defaults to the default // resolver, which uses /etc/resolv.conf for configuration. For cluster types // other than // :ref:`STRICT_DNS` // and :ref:`LOGICAL_DNS` // this setting is ignored. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple's API only allows overriding DNS resolvers via system settings. repeated core.v4alpha.Address dns_resolvers = 18; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 45; // If specified, outlier detection will be enabled for this upstream cluster. // Each of the configuration values can be overridden via // :ref:`runtime values `. OutlierDetection outlier_detection = 19; // The interval for removing stale hosts from a cluster type // :ref:`ORIGINAL_DST`. // Hosts are considered stale if they have not been used // as upstream destinations during this interval. New hosts are added // to original destination clusters on demand as new connections are // redirected to Envoy, causing the number of hosts in the cluster to // grow over time. Hosts that are not stale (they are actively used as // destinations) are kept in the cluster, which allows connections to // them remain open, saving the latency that would otherwise be spent // on opening new connections. If this setting is not specified, the // value defaults to 5000ms. For cluster types other than // :ref:`ORIGINAL_DST` // this setting is ignored. google.protobuf.Duration cleanup_interval = 20 [(validate.rules).duration = {gt {}}]; // Optional configuration used to bind newly established upstream connections. // This overrides any bind_config specified in the bootstrap proto. // If the address and port are empty, no bind will be performed. core.v4alpha.BindConfig upstream_bind_config = 21; // Configuration for load balancing subsetting. LbSubsetConfig lb_subset_config = 22; // Optional configuration for the load balancing algorithm selected by // LbPolicy. Currently only // :ref:`RING_HASH`, // :ref:`MAGLEV` and // :ref:`LEAST_REQUEST` // has additional configuration options. // Specifying ring_hash_lb_config or maglev_lb_config or least_request_lb_config without setting the corresponding // LbPolicy will generate an error at runtime. oneof lb_config { // Optional configuration for the Ring Hash load balancing policy. RingHashLbConfig ring_hash_lb_config = 23; // Optional configuration for the Maglev load balancing policy. MaglevLbConfig maglev_lb_config = 52; // Optional configuration for the Original Destination load balancing policy. OriginalDstLbConfig original_dst_lb_config = 34; // Optional configuration for the LeastRequest load balancing policy. LeastRequestLbConfig least_request_lb_config = 37; } // Common configuration for all load balancer implementations. CommonLbConfig common_lb_config = 27; // Optional custom transport socket implementation to use for upstream connections. // To setup TLS, set a transport socket with name `tls` and // :ref:`UpstreamTlsContexts ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. core.v4alpha.TransportSocket transport_socket = 24; // The Metadata field can be used to provide additional information about the // cluster. It can be used for stats, logging, and varying filter behavior. // Fields should use reverse DNS notation to denote which entity within Envoy // will need the information. For instance, if the metadata is intended for // the Router filter, the filter name should be specified as *envoy.filters.http.router*. core.v4alpha.Metadata metadata = 25; // Determines how Envoy selects the protocol used to speak to upstream hosts. ClusterProtocolSelection protocol_selection = 26; // Optional options for upstream connections. UpstreamConnectionOptions upstream_connection_options = 30; // If an upstream host becomes unhealthy (as determined by the configured health checks // or outlier detection), immediately close all connections to the failed host. // // .. note:: // // This is currently only supported for connections created by tcp_proxy. // // .. note:: // // The current implementation of this feature closes all connections immediately when // the unhealthy status is detected. If there are a large number of connections open // to an upstream host that becomes unhealthy, Envoy may spend a substantial amount of // time exclusively closing these connections, and not processing any other traffic. bool close_connections_on_host_health_failure = 31; // If set to true, Envoy will ignore the health value of a host when processing its removal // from service discovery. This means that if active health checking is used, Envoy will *not* // wait for the endpoint to go unhealthy before removing it. bool ignore_health_on_host_removal = 32; // An (optional) network filter chain, listed in the order the filters should be applied. // The chain will be applied to all outgoing connections that Envoy makes to the upstream // servers of this cluster. repeated Filter filters = 40; // [#not-implemented-hide:] New mechanism for LB policy configuration. Used only if the // :ref:`lb_policy` field has the value // :ref:`LOAD_BALANCING_POLICY_CONFIG`. LoadBalancingPolicy load_balancing_policy = 41; // [#not-implemented-hide:] // If present, tells the client where to send load reports via LRS. If not present, the // client will fall back to a client-side default, which may be either (a) don't send any // load reports or (b) send load reports for all clusters to a single default server // (which may be configured in the bootstrap file). // // Note that if multiple clusters point to the same LRS server, the client may choose to // create a separate stream for each cluster or it may choose to coalesce the data for // multiple clusters onto a single stream. Either way, the client must make sure to send // the data for any given cluster on no more than one stream. // // [#next-major-version: In the v3 API, we should consider restructuring this somehow, // maybe by allowing LRS to go on the ADS stream, or maybe by moving some of the negotiation // from the LRS stream here.] core.v4alpha.ConfigSource lrs_server = 42; // Optional customization and configuration of upstream connection pool, and upstream type. // // Currently this field only applies for HTTP traffic but is designed for eventual use for custom // TCP upstreams. // // For HTTP traffic, Envoy will generally take downstream HTTP and send it upstream as upstream // HTTP, using the http connection pool and the codec from `http2_protocol_options` // // For routes where CONNECT termination is configured, Envoy will take downstream CONNECT // requests and forward the CONNECT payload upstream over raw TCP using the tcp connection pool. // // The default pool used is the generic connection pool which creates the HTTP upstream for most // HTTP requests, and the TCP upstream if CONNECT termination is configured. // // If users desire custom connection pool or upstream behavior, for example terminating // CONNECT only if a custom filter indicates it is appropriate, the custom factories // can be registered and configured here. core.v4alpha.TypedExtensionConfig upstream_config = 48; // Configuration to track optional cluster stats. TrackClusterStats track_cluster_stats = 49; // [#not-implemented-hide:] // Prefetch configuration for this cluster. PrefetchPolicy prefetch_policy = 50; // If `connection_pool_per_downstream_connection` is true, the cluster will use a separate // connection pool for every downstream connection bool connection_pool_per_downstream_connection = 51; } // [#not-implemented-hide:] Extensible load balancing policy configuration. // // Every LB policy defined via this mechanism will be identified via a unique name using reverse // DNS notation. If the policy needs configuration parameters, it must define a message for its // own configuration, which will be stored in the config field. The name of the policy will tell // clients which type of message they should expect to see in the config field. // // Note that there are cases where it is useful to be able to independently select LB policies // for choosing a locality and for choosing an endpoint within that locality. For example, a // given deployment may always use the same policy to choose the locality, but for choosing the // endpoint within the locality, some clusters may use weighted-round-robin, while others may // use some sort of session-based balancing. // // This can be accomplished via hierarchical LB policies, where the parent LB policy creates a // child LB policy for each locality. For each request, the parent chooses the locality and then // delegates to the child policy for that locality to choose the endpoint within the locality. // // To facilitate this, the config message for the top-level LB policy may include a field of // type LoadBalancingPolicy that specifies the child policy. message LoadBalancingPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.LoadBalancingPolicy"; message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.LoadBalancingPolicy.Policy"; reserved 2; reserved "config"; // Required. The name of the LB policy. string name = 1; google.protobuf.Any typed_config = 3; } // Each client will iterate over the list in order and stop at the first policy that it // supports. This provides a mechanism for starting to use new LB policies that are not yet // supported by all clients. repeated Policy policies = 1; } // An extensible structure containing the address Envoy should bind to when // establishing upstream connections. message UpstreamBindConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.UpstreamBindConfig"; // The address Envoy should bind to when establishing upstream connections. core.v4alpha.Address source_address = 1; } message UpstreamConnectionOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.UpstreamConnectionOptions"; // If set then set SO_KEEPALIVE on the socket to enable TCP Keepalives. core.v4alpha.TcpKeepalive tcp_keepalive = 1; } message TrackClusterStats { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.TrackClusterStats"; // If timeout_budgets is true, the :ref:`timeout budget histograms // ` will be published for each // request. These show what percentage of a request's per try and global timeout was used. A value // of 0 would indicate that none of the timeout was used or that the timeout was infinite. A value // of 100 would indicate that the request took the entirety of the timeout given to it. bool timeout_budgets = 1; // If request_response_sizes is true, then the :ref:`histograms // ` tracking header and body sizes // of requests and responses will be published. bool request_response_sizes = 2; } ================================================ FILE: api/envoy/config/cluster/v4alpha/filter.proto ================================================ syntax = "proto3"; package envoy.config.cluster.v4alpha; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; option java_outer_classname = "FilterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Upstream filters] // Upstream filters apply to the connections to the upstream cluster hosts. message Filter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.Filter"; // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. google.protobuf.Any typed_config = 2; } ================================================ FILE: api/envoy/config/cluster/v4alpha/outlier_detection.proto ================================================ syntax = "proto3"; package envoy.config.cluster.v4alpha; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.cluster.v4alpha"; option java_outer_classname = "OutlierDetectionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Outlier detection] // See the :ref:`architecture overview ` for // more information on outlier detection. // [#next-free-field: 21] message OutlierDetection { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.v3.OutlierDetection"; // The number of consecutive 5xx responses or local origin errors that are mapped // to 5xx error codes before a consecutive 5xx ejection // occurs. Defaults to 5. google.protobuf.UInt32Value consecutive_5xx = 1; // The time interval between ejection analysis sweeps. This can result in // both new ejections as well as hosts being returned to service. Defaults // to 10000ms or 10s. google.protobuf.Duration interval = 2 [(validate.rules).duration = {gt {}}]; // The base time that a host is ejected for. The real time is equal to the // base time multiplied by the number of times the host has been ejected. // Defaults to 30000ms or 30s. google.protobuf.Duration base_ejection_time = 3 [(validate.rules).duration = {gt {}}]; // The maximum % of an upstream cluster that can be ejected due to outlier // detection. Defaults to 10% but will eject at least one host regardless of the value. google.protobuf.UInt32Value max_ejection_percent = 4 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive 5xx. This setting can be used to disable // ejection or to ramp it up slowly. Defaults to 100. google.protobuf.UInt32Value enforcing_consecutive_5xx = 5 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics. This setting can be used to // disable ejection or to ramp it up slowly. Defaults to 100. google.protobuf.UInt32Value enforcing_success_rate = 6 [(validate.rules).uint32 = {lte: 100}]; // The number of hosts in a cluster that must have enough request volume to // detect success rate outliers. If the number of hosts is less than this // setting, outlier detection via success rate statistics is not performed // for any host in the cluster. Defaults to 5. google.protobuf.UInt32Value success_rate_minimum_hosts = 7; // The minimum number of total requests that must be collected in one // interval (as defined by the interval duration above) to include this host // in success rate based outlier detection. If the volume is lower than this // setting, outlier detection via success rate statistics is not performed // for that host. Defaults to 100. google.protobuf.UInt32Value success_rate_request_volume = 8; // This factor is used to determine the ejection threshold for success rate // outlier ejection. The ejection threshold is the difference between the // mean success rate, and the product of this factor and the standard // deviation of the mean success rate: mean - (stdev * // success_rate_stdev_factor). This factor is divided by a thousand to get a // double. That is, if the desired factor is 1.9, the runtime value should // be 1900. Defaults to 1900. google.protobuf.UInt32Value success_rate_stdev_factor = 9; // The number of consecutive gateway failures (502, 503, 504 status codes) // before a consecutive gateway failure ejection occurs. Defaults to 5. google.protobuf.UInt32Value consecutive_gateway_failure = 10; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive gateway failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_consecutive_gateway_failure = 11 [(validate.rules).uint32 = {lte: 100}]; // Determines whether to distinguish local origin failures from external errors. If set to true // the following configuration parameters are taken into account: // :ref:`consecutive_local_origin_failure`, // :ref:`enforcing_consecutive_local_origin_failure` // and // :ref:`enforcing_local_origin_success_rate`. // Defaults to false. bool split_external_local_origin_errors = 12; // The number of consecutive locally originated failures before ejection // occurs. Defaults to 5. Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value consecutive_local_origin_failure = 13; // The % chance that a host will be actually ejected when an outlier status // is detected through consecutive locally originated failures. This setting can be // used to disable ejection or to ramp it up slowly. Defaults to 100. // Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_consecutive_local_origin_failure = 14 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status // is detected through success rate statistics for locally originated errors. // This setting can be used to disable ejection or to ramp it up slowly. Defaults to 100. // Parameter takes effect only when // :ref:`split_external_local_origin_errors` // is set to true. google.protobuf.UInt32Value enforcing_local_origin_success_rate = 15 [(validate.rules).uint32 = {lte: 100}]; // The failure percentage to use when determining failure percentage-based outlier detection. If // the failure percentage of a given host is greater than or equal to this value, it will be // ejected. Defaults to 85. google.protobuf.UInt32Value failure_percentage_threshold = 16 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // failure percentage statistics. This setting can be used to disable ejection or to ramp it up // slowly. Defaults to 0. // // [#next-major-version: setting this without setting failure_percentage_threshold should be // invalid in v4.] google.protobuf.UInt32Value enforcing_failure_percentage = 17 [(validate.rules).uint32 = {lte: 100}]; // The % chance that a host will be actually ejected when an outlier status is detected through // local-origin failure percentage statistics. This setting can be used to disable ejection or to // ramp it up slowly. Defaults to 0. google.protobuf.UInt32Value enforcing_failure_percentage_local_origin = 18 [(validate.rules).uint32 = {lte: 100}]; // The minimum number of hosts in a cluster in order to perform failure percentage-based ejection. // If the total number of hosts in the cluster is less than this value, failure percentage-based // ejection will not be performed. Defaults to 5. google.protobuf.UInt32Value failure_percentage_minimum_hosts = 19; // The minimum number of total requests that must be collected in one interval (as defined by the // interval duration above) to perform failure percentage-based ejection for this host. If the // volume is lower than this setting, failure percentage-based ejection will not be performed for // this host. Defaults to 50. google.protobuf.UInt32Value failure_percentage_request_volume = 20; } ================================================ FILE: api/envoy/config/common/dynamic_forward_proxy/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto ================================================ syntax = "proto3"; package envoy.config.common.dynamic_forward_proxy.v2alpha; import "envoy/api/v2/cluster.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.common.dynamic_forward_proxy.v2alpha"; option java_outer_classname = "DnsCacheProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.dynamic_forward_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamic forward proxy common configuration] // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. // [#next-free-field: 7] message DnsCacheConfig { // The name of the cache. Multiple named caches allow independent dynamic forward proxy // configurations to operate within a single Envoy process using different configurations. All // configurations with the same name *must* otherwise have the same settings when referenced // from different configuration components. Configuration will fail to load if this is not // the case. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The DNS lookup family to use during resolution. // // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy eyeballs" mode. The // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and // then configures a host to have a primary and fall back address. With this, we could very // likely build a "happy eyeballs" connection pool which would race the primary / fall back // address and return the one that wins. This same method could potentially also be used for // QUIC to TCP fall back.] api.v2.Cluster.DnsLookupFamily dns_lookup_family = 2 [(validate.rules).enum = {defined_only: true}]; // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. // // .. note: // // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be // added in a future change. // // .. note: // // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gte {nanos: 1000000}}]; // The TTL for hosts that are unused. Hosts that have not been used in the configured time // interval will be purged. If not specified defaults to 5m. // // .. note: // // The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This // means that if the configured TTL is shorter than the refresh rate the host may not be removed // immediately. // // .. note: // // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. // // .. note: // // The implementation is approximate and enforced independently on each worker thread, thus // it is possible for the maximum hosts in the cache to go slightly above the configured // value depending on timing. This is similar to how other circuit breakers work. google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; // If the DNS failure refresh rate is specified, // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is // not specified, the failure refresh rate defaults to the dns_refresh_rate. api.v2.Cluster.RefreshRate dns_failure_refresh_rate = 6; } ================================================ FILE: api/envoy/config/common/matcher/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/route/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/common/matcher/v3/matcher.proto ================================================ syntax = "proto3"; package envoy.config.common.matcher.v3; import "envoy/config/route/v3/route_components.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.common.matcher.v3"; option java_outer_classname = "MatcherProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Unified Matcher API] // Match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. // [#next-free-field: 11] message MatchPredicate { // A set of match configurations used for logical operations. message MatchSet { // The list of rules that make up the set. repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { option (validate.required) = true; // A set that describes a logical OR. If any member of the set matches, the match configuration // matches. MatchSet or_match = 1; // A set that describes a logical AND. If all members of the set match, the match configuration // matches. MatchSet and_match = 2; // A negation match. The match configuration will match if the negated match condition matches. MatchPredicate not_match = 3; // The match configuration will always match. bool any_match = 4 [(validate.rules).bool = {const: true}]; // HTTP request headers match configuration. HttpHeadersMatch http_request_headers_match = 5; // HTTP request trailers match configuration. HttpHeadersMatch http_request_trailers_match = 6; // HTTP response headers match configuration. HttpHeadersMatch http_response_headers_match = 7; // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; // HTTP request generic body match configuration. HttpGenericBodyMatch http_request_generic_body_match = 9; // HTTP response generic body match configuration. HttpGenericBodyMatch http_response_generic_body_match = 10; } } // HTTP headers match configuration. message HttpHeadersMatch { // HTTP headers to match. repeated route.v3.HeaderMatcher headers = 1; } // HTTP generic body match configuration. // List of text strings and hex strings to be located in HTTP body. // All specified strings must be found in the HTTP body for positive match. // The search may be limited to specified number of bytes from the body start. // // .. attention:: // // Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. // If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified // to scan only part of the http body. message HttpGenericBodyMatch { message GenericTextMatch { oneof rule { option (validate.required) = true; // Text string to be located in HTTP body. string string_match = 1 [(validate.rules).string = {min_len: 1}]; // Sequence of bytes to be located in HTTP body. bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; } } // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). uint32 bytes_limit = 1; // List of patterns to match. repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/config/common/matcher/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/common/matcher/v3:pkg", "//envoy/config/route/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/common/matcher/v4alpha/matcher.proto ================================================ syntax = "proto3"; package envoy.config.common.matcher.v4alpha; import "envoy/config/route/v4alpha/route_components.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.common.matcher.v4alpha"; option java_outer_classname = "MatcherProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Unified Matcher API] // Match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. // [#next-free-field: 11] message MatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.matcher.v3.MatchPredicate"; // A set of match configurations used for logical operations. message MatchSet { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.matcher.v3.MatchPredicate.MatchSet"; // The list of rules that make up the set. repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { option (validate.required) = true; // A set that describes a logical OR. If any member of the set matches, the match configuration // matches. MatchSet or_match = 1; // A set that describes a logical AND. If all members of the set match, the match configuration // matches. MatchSet and_match = 2; // A negation match. The match configuration will match if the negated match condition matches. MatchPredicate not_match = 3; // The match configuration will always match. bool any_match = 4 [(validate.rules).bool = {const: true}]; // HTTP request headers match configuration. HttpHeadersMatch http_request_headers_match = 5; // HTTP request trailers match configuration. HttpHeadersMatch http_request_trailers_match = 6; // HTTP response headers match configuration. HttpHeadersMatch http_response_headers_match = 7; // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; // HTTP request generic body match configuration. HttpGenericBodyMatch http_request_generic_body_match = 9; // HTTP response generic body match configuration. HttpGenericBodyMatch http_response_generic_body_match = 10; } } // HTTP headers match configuration. message HttpHeadersMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.matcher.v3.HttpHeadersMatch"; // HTTP headers to match. repeated route.v4alpha.HeaderMatcher headers = 1; } // HTTP generic body match configuration. // List of text strings and hex strings to be located in HTTP body. // All specified strings must be found in the HTTP body for positive match. // The search may be limited to specified number of bytes from the body start. // // .. attention:: // // Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. // If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified // to scan only part of the http body. message HttpGenericBodyMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.matcher.v3.HttpGenericBodyMatch"; message GenericTextMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.matcher.v3.HttpGenericBodyMatch.GenericTextMatch"; oneof rule { option (validate.required) = true; // Text string to be located in HTTP body. string string_match = 1 [(validate.rules).string = {min_len: 1}]; // Sequence of bytes to be located in HTTP body. bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; } } // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). uint32 bytes_limit = 1; // List of patterns to match. repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/config/common/tap/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/service/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/common/tap/v2alpha/common.proto ================================================ syntax = "proto3"; package envoy.config.common.tap.v2alpha; import "envoy/api/v2/core/config_source.proto"; import "envoy/service/tap/v2alpha/common.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.common.tap.v2alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.common.tap.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common tap extension configuration] // Common configuration for all tap extensions. message CommonExtensionConfig { // [#not-implemented-hide:] message TapDSConfig { // Configuration for the source of TapDS updates for this Cluster. api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // Tap config to request from XDS server. string name = 2 [(validate.rules).string = {min_bytes: 1}]; } oneof config_type { option (validate.required) = true; // If specified, the tap filter will be configured via an admin handler. AdminConfig admin_config = 1; // If specified, the tap filter will be configured via a static configuration that cannot be // changed. service.tap.v2alpha.TapConfig static_config = 2; // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. TapDSConfig tapds_config = 3; } } // Configuration for the admin handler. See :ref:`here ` for // more information. message AdminConfig { // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. string config_id = 1 [(validate.rules).string = {min_bytes: 1}]; } ================================================ FILE: api/envoy/config/core/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2/core:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/config/core/v3/address.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/socket_option.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "AddressProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Network addresses] message Pipe { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Pipe"; // Unix Domain Socket path. On Linux, paths starting with '@' will use the // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. string path = 1 [(validate.rules).string = {min_len: 1}]; // The mode for the Pipe. Not applicable for abstract sockets. uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; } // [#not-implemented-hide:] The address represents an envoy internal listener. // TODO(lambdai): Make this address available for listener and endpoint. // TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. message EnvoyInternalAddress { oneof address_name_specifier { option (validate.required) = true; // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. string server_listener_name = 1; } } // [#next-free-field: 7] message SocketAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketAddress"; enum Protocol { TCP = 0; UDP = 1; } Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; // The address for this socket. :ref:`Listeners ` will bind // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: // It is possible to distinguish a Listener address via the prefix/suffix matching // in :ref:`FilterChainMatch `.] When used // within an upstream :ref:`BindConfig `, the address // controls the source address of outbound connections. For :ref:`clusters // `, the cluster type determines whether the // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. string address = 2 [(validate.rules).string = {min_len: 1}]; oneof port_specifier { option (validate.required) = true; uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; // This is only valid if :ref:`resolver_name // ` is specified below and the // named resolver is capable of named port resolution. string named_port = 4; } // The name of the custom resolver. This must have been registered with Envoy. If // this is empty, a context dependent default applies. If the address is a concrete // IP address, no resolution will occur. If address is a hostname this // should be set for resolution other than DNS. Specifying a custom resolver with // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. string resolver_name = 5; // When binding to an IPv6 address above, this enables `IPv4 compatibility // `_. Binding to ``::`` will // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into // IPv6 space as ``::FFFF:``. bool ipv4_compat = 6; } message TcpKeepalive { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TcpKeepalive"; // Maximum number of keepalive probes to send without response before deciding // the connection is dead. Default is to use the OS level configuration (unless // overridden, Linux defaults to 9.) google.protobuf.UInt32Value keepalive_probes = 1; // The number of seconds a connection needs to be idle before keep-alive probes // start being sent. Default is to use the OS level configuration (unless // overridden, Linux defaults to 7200s (i.e., 2 hours.) google.protobuf.UInt32Value keepalive_time = 2; // The number of seconds between keep-alive probes. Default is to use the OS // level configuration (unless overridden, Linux defaults to 75s.) google.protobuf.UInt32Value keepalive_interval = 3; } message BindConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BindConfig"; // The address to bind to when creating a socket. SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; // Whether to set the *IP_FREEBIND* option when creating the socket. When this // flag is set to true, allows the :ref:`source_address // ` to be an IP address // that is not configured on the system running Envoy. When this flag is set // to false, the option *IP_FREEBIND* is disabled on the socket. When this // flag is not set (default), the socket is not modified, i.e. the option is // neither enabled nor disabled. google.protobuf.BoolValue freebind = 2; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. repeated SocketOption socket_options = 3; } // Addresses specify either a logical or physical address and port, which are // used to tell Envoy where to bind/listen, connect to upstream and find // management servers. message Address { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Address"; oneof address { option (validate.required) = true; SocketAddress socket_address = 1; Pipe pipe = 2; // [#not-implemented-hide:] EnvoyInternalAddress envoy_internal_address = 3; } } // CidrRange specifies an IP Address and a prefix length to construct // the subnet mask for a `CIDR `_ range. message CidrRange { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.CidrRange"; // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Length of prefix, e.g. 0, 32. google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; } ================================================ FILE: api/envoy/config/core/v3/backoff.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "BackoffProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Backoff Strategy] // Configuration defining a jittered exponential back off strategy. message BackoffStrategy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BackoffStrategy"; // The base interval to be used for the next back off computation. It should // be greater than zero and less than or equal to :ref:`max_interval // `. google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { required: true gte {nanos: 1000000} }]; // Specifies the maximum interval between retries. This parameter is optional, // but must be greater than or equal to the :ref:`base_interval // ` if set. The default // is 10 times the :ref:`base_interval // `. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; } ================================================ FILE: api/envoy/config/core/v3/base.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/backoff.proto"; import "envoy/config/core/v3/http_uri.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/semantic_version.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "BaseProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common types] // Envoy supports :ref:`upstream priority routing // ` both at the route and the virtual // cluster level. The current priority implementation uses different connection // pool and circuit breaking settings for each priority level. This means that // even for HTTP/2 requests, two physical connections will be used to an // upstream host. In the future Envoy will likely support true HTTP/2 priority // over a single upstream connection. enum RoutingPriority { DEFAULT = 0; HIGH = 1; } // HTTP request method. enum RequestMethod { METHOD_UNSPECIFIED = 0; GET = 1; HEAD = 2; POST = 3; PUT = 4; DELETE = 5; CONNECT = 6; OPTIONS = 7; TRACE = 8; PATCH = 9; } // Identifies the direction of the traffic relative to the local Envoy. enum TrafficDirection { // Default option is unspecified. UNSPECIFIED = 0; // The transport is used for incoming traffic. INBOUND = 1; // The transport is used for outgoing traffic. OUTBOUND = 2; } // Identifies location of where either Envoy runs or where upstream hosts run. message Locality { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Locality"; // Region this :ref:`zone ` belongs to. string region = 1; // Defines the local service zone where Envoy is running. Though optional, it // should be set if discovery service routing is used and the discovery // service exposes :ref:`zone data `, // either in this message or via :option:`--service-zone`. The meaning of zone // is context dependent, e.g. `Availability Zone (AZ) // `_ // on AWS, `Zone `_ on // GCP, etc. string zone = 2; // When used for locality of upstream hosts, this field further splits zone // into smaller chunks of sub-zones so they can be load balanced // independently. string sub_zone = 3; } // BuildVersion combines SemVer version of extension with free-form build information // (i.e. 'alpha', 'private-build') as a set of strings. message BuildVersion { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.BuildVersion"; // SemVer version of extension. type.v3.SemanticVersion version = 1; // Free-form build information. // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } // Version and identification for an Envoy extension. // [#next-free-field: 6] message Extension { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Extension"; // This is the name of the Envoy filter as specified in the Envoy // configuration, e.g. envoy.filters.http.router, com.acme.widget. string name = 1; // Category of the extension. // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from // acme.com vendor. // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] string category = 2; // [#not-implemented-hide:] Type descriptor of extension configuration proto. // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] string type_descriptor = 3; // The version is a property of the extension and maintained independently // of other extensions and the Envoy API. // This field is not set when extension did not provide version information. BuildVersion version = 4; // Indicates that the extension is present but was disabled via dynamic configuration. bool disabled = 5; } // Identifies a specific Envoy instance. The node identifier is presented to the // management server, which may use this identifier to distinguish per Envoy // configuration for serving. // [#next-free-field: 12] message Node { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Node"; reserved 5; reserved "build_version"; // An opaque node identifier for the Envoy node. This also provides the local // service node name. It should be set if any of the following features are // used: :ref:`statsd `, :ref:`CDS // `, and :ref:`HTTP tracing // `, either in this message or via // :option:`--service-node`. string id = 1; // Defines the local service cluster name where Envoy is running. Though // optional, it should be set if any of the following features are used: // :ref:`statsd `, :ref:`health check cluster // verification // `, // :ref:`runtime override directory `, // :ref:`user agent addition // `, // :ref:`HTTP global rate limiting `, // :ref:`CDS `, and :ref:`HTTP tracing // `, either in this message or via // :option:`--service-cluster`. string cluster = 2; // Opaque metadata extending the node identifier. Envoy will pass this // directly to the management server. google.protobuf.Struct metadata = 3; // Locality specifying where the Envoy instance is running. Locality locality = 4; // Free-form string that identifies the entity requesting config. // E.g. "envoy" or "grpc" string user_agent_name = 6; oneof user_agent_version_type { // Free-form string that identifies the version of the entity requesting config. // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" string user_agent_version = 7; // Structured version of the entity requesting config. BuildVersion user_agent_build_version = 8; } // List of extensions and their versions supported by the node. repeated Extension extensions = 9; // Client feature support list. These are well known features described // in the Envoy API repository for a given major version of an API. Client features // use reverse DNS naming scheme, for example `com.acme.feature`. // See :ref:`the list of features ` that xDS client may // support. repeated string client_features = 10; // Known listening ports on the node as a generic hint to the management server // for filtering :ref:`listeners ` to be returned. For example, // if there is a listener bound to port 80, the list can optionally contain the // SocketAddress `(0.0.0.0,80)`. The field is optional and just a hint. repeated Address listening_addresses = 11 [deprecated = true]; } // Metadata provides additional inputs to filters based on matched listeners, // filter chains, routes and endpoints. It is structured as a map, usually from // filter name (in reverse DNS format) to metadata specific to the filter. Metadata // key-values for a filter are merged as connection and request handling occurs, // with later values for the same key overriding earlier values. // // An example use of metadata is providing additional values to // http_connection_manager in the envoy.http_connection_manager.access_log // namespace. // // Another example use of metadata is to per service config info in cluster metadata, which may get // consumed by multiple filters. // // For load balancing, Metadata provides a means to subset cluster endpoints. // Endpoints have a Metadata object associated and routes contain a Metadata // object to match against. There are some well defined metadata used today for // this purpose: // // * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an // endpoint and is also used during header processing // (x-envoy-upstream-canary) and for stats purposes. // [#next-major-version: move to type/metadata/v2] message Metadata { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Metadata"; // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* // namespace is reserved for Envoy's built-in filters. map filter_metadata = 1; } // Runtime derived uint32 with a default when not specified. message RuntimeUInt32 { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeUInt32"; // Default value if runtime value is not available. uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; } // Runtime derived percentage with a default when not specified. message RuntimePercent { // Default value if runtime value is not available. type.v3.Percent default_value = 1; // Runtime key to get value for comparison. This value is used if defined. string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived double with a default when not specified. message RuntimeDouble { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeDouble"; // Default value if runtime value is not available. double default_value = 1; // Runtime key to get value for comparison. This value is used if defined. string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived bool with a default when not specified. message RuntimeFeatureFlag { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeFeatureFlag"; // Default value if runtime value is not available. google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; // Runtime key to get value for comparison. This value is used if defined. The boolean value must // be represented via its // `canonical JSON encoding `_. string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Header name/value pair. message HeaderValue { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValue"; // Header name. string key = 1 [(validate.rules).string = {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Header value. // // The same :ref:`format specifier ` as used for // :ref:`HTTP access logging ` applies here, however // unknown header values are replaced with the empty string instead of `-`. string value = 2 [ (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} ]; } // Header name/value pair plus option to control append behavior. message HeaderValueOption { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderValueOption"; // Header name/value pair that this option applies to. HeaderValue header = 1 [(validate.rules).message = {required: true}]; // Should the value be appended? If true (default), the value is appended to // existing values. Otherwise it replaces any existing values. google.protobuf.BoolValue append = 2; } // Wrapper for a set of headers. message HeaderMap { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HeaderMap"; repeated HeaderValue headers = 1; } // Data source consisting of either a file or an inline value. message DataSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.DataSource"; oneof specifier { option (validate.required) = true; // Local filesystem data source. string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. string inline_string = 3 [(validate.rules).string = {min_len: 1}]; } } // The message specifies the retry policy of remote data source when fetching fails. message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RetryPolicy"; // Specifies parameters that control :ref:`retry backoff strategy `. // This parameter is optional, in which case the default base interval is 1000 milliseconds. The // default maximum interval is 10 times the base interval. BackoffStrategy retry_back_off = 1; // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. google.protobuf.UInt32Value num_retries = 2 [(udpa.annotations.field_migrate).rename = "max_retries"]; } // The message specifies how to fetch data from remote and how to verify it. message RemoteDataSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RemoteDataSource"; // The HTTP URI to fetch the remote data. HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. string sha256 = 2 [(validate.rules).string = {min_len: 1}]; // Retry policy for fetching remote data. RetryPolicy retry_policy = 3; } // Async data source which support async data fetch. message AsyncDataSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.AsyncDataSource"; oneof specifier { option (validate.required) = true; // Local async data source. DataSource local = 1; // Remote async data source. RemoteDataSource remote = 2; } } // Configuration for transport socket in :ref:`listeners ` and // :ref:`clusters `. If the configuration is // empty, a default transport socket implementation and configuration will be // chosen based on the platform and existence of tls_context. message TransportSocket { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TransportSocket"; reserved 2; reserved "config"; // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. string name = 1 [(validate.rules).string = {min_len: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. oneof config_type { google.protobuf.Any typed_config = 3; } } // Runtime derived FractionalPercent with defaults for when the numerator or denominator is not // specified via a runtime key. // // .. note:: // // Parsing of the runtime key's data is implemented such that it may be represented as a // :ref:`FractionalPercent ` proto represented as JSON/YAML // and may also be represented as an integer with the assumption that the value is an integral // percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse // as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. message RuntimeFractionalPercent { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RuntimeFractionalPercent"; // Default value if the runtime value's for the numerator/denominator keys are not available. type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; // Runtime key for a YAML representation of a FractionalPercent. string runtime_key = 2; } // Identifies a specific ControlPlane instance that Envoy is connected to. message ControlPlane { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ControlPlane"; // An opaque control plane identifier that uniquely identifies an instance // of control plane. This can be used to identify which control plane instance, // the Envoy is connected to. string identifier = 1; } ================================================ FILE: api/envoy/config/core/v3/config_source.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/authority.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "ConfigSourceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Configuration sources] // xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API // versioning. If a client does not support v2 (e.g. due to deprecation), this // is an invalid value. AUTO = 0; // Use xDS v2 API. V2 = 1; // Use xDS v3 API. V3 = 2; } // API configuration source. This identifies the API type and cluster that Envoy // will use to fetch an xDS API. // [#next-free-field: 9] message ApiConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ApiConfigSource"; // APIs may be fetched via either REST or gRPC. enum ApiType { // Ideally this would be 'reserved 0' but one can't reserve the default // value. Instead we throw an exception if this is ever used. DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; // REST-JSON v2 API. The `canonical JSON encoding // `_ for // the v2 protos is used. REST = 1; // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. DELTA_GRPC = 3; // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be // multiplexed on a single connection to an ADS endpoint. // [#not-implemented-hide:] AGGREGATED_GRPC = 5; // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be // multiplexed on a single connection to an ADS endpoint. // [#not-implemented-hide:] AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; // API version for xDS transport protocol. This describes the xDS gRPC/REST // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; // Cluster names should be used only with REST. If > 1 // cluster is defined, clusters will be cycled through if any kind of failure // occurs. // // .. note:: // // The cluster with name ``cluster_name`` must be statically defined and its // type must not be ``EDS``. repeated string cluster_names = 2; // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, // services will be cycled through if any kind of failure occurs. repeated GrpcService grpc_services = 4; // For REST APIs, the delay between successive polls. google.protobuf.Duration refresh_delay = 3; // For REST APIs, the request timeout. If not set, a default value of 1s will be used. google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be // rate limited. RateLimitSettings rate_limit_settings = 6; // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. bool set_node_on_first_message_only = 7; } // Aggregated Discovery Service (ADS) options. This is currently empty, but when // set in :ref:`ConfigSource ` can be used to // specify that ADS is to be used. message AggregatedConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.AggregatedConfigSource"; } // [#not-implemented-hide:] // Self-referencing config source options. This is currently empty, but when // set in :ref:`ConfigSource ` can be used to // specify that other data can be obtained from the same server. message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SelfConfigSource"; // API version for xDS transport protocol. This describes the xDS gRPC/REST // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. message RateLimitSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.RateLimitSettings"; // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a // default value of 100 will be used. google.protobuf.UInt32Value max_tokens = 1; // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens // per second will be used. google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; } // Configuration for :ref:`listeners `, :ref:`clusters // `, :ref:`routes // `, :ref:`endpoints // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. // [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.ConfigSource"; // Authorities that this config source may be used for. An authority specified // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior // to configuration fetch. This field provides the association between // authority name and configuration source. // [#not-implemented-hide:] repeated udpa.core.v1.Authority authorities = 7; oneof config_source_specifier { option (validate.required) = true; // Path on the filesystem to source and watch for configuration updates. // When sourcing configuration for :ref:`secret `, // the certificate and key files are also watched for updates. // // .. note:: // // The path to the source must exist at config load time. // // .. note:: // // Envoy will only watch the file path for *moves.* This is because in general only moves // are atomic. The same method of swapping files as is demonstrated in the // :ref:`runtime documentation ` can be used here also. string path = 1; // API configuration source. ApiConfigSource api_config_source = 2; // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. AggregatedConfigSource ads = 3; // [#not-implemented-hide:] // When set, the client will access the resources from the same server it got the // ConfigSource from, although not necessarily from the same stream. This is similar to the // :ref:`ads` field, except that the client may use a // different stream to the same server. As a result, this field can be used for things // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) // LDS to RDS on the same server without requiring the management server to know its name // or required credentials. // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since // this field can implicitly mean to use the same stream in the case where the ConfigSource // is provided via ADS and the specified data can also be obtained via ADS.] SelfConfigSource self = 5; } // When this timeout is specified, Envoy will wait no longer than the specified time for first // config response on this xDS subscription during the :ref:`initialization process // `. After reaching the timeout, Envoy will move to the next // initialization phase, even if the first config is not delivered yet. The timer is activated // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another // timeout applies). The default is 15s. google.protobuf.Duration initial_fetch_timeout = 4; // API version for xDS resources. This implies the type URLs that the client // will request for resources and the resource type that the client will in // turn expect to be delivered. ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/config/core/v3/event_service_config.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "EventServiceConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#not-implemented-hide:] // Configuration of the event reporting service endpoint. message EventServiceConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.EventServiceConfig"; oneof config_source_specifier { option (validate.required) = true; // Specifies the gRPC service that hosts the event reporting service. GrpcService grpc_service = 1; } } ================================================ FILE: api/envoy/config/core/v3/extension.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/config_source.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "ExtensionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Extension configuration] // Message type for extension configuration. // [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. message TypedExtensionConfig { // The name of an extension. This is not used to select the extension, instead // it serves the role of an opaque identifier. string name = 1 [(validate.rules).string = {min_len: 1}]; // The typed config for the extension. The type URL will be used to identify // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, // the inner type URL of *TypedStruct* will be utilized. See the // :ref:`extension configuration overview // ` for further details. google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; } // Configuration source specifier for a late-bound extension configuration. The // parent resource is warmed until all the initial extension configurations are // received, unless the flag to apply the default configuration is set. // Subsequent extension updates are atomic on a per-worker basis. Once an // extension configuration is applied to a request or a connection, it remains // constant for the duration of processing. If the initial delivery of the // extension configuration fails, due to a timeout for example, the optional // default configuration is applied. Without a default configuration, the // extension is disabled, until an extension configuration is received. The // behavior of a disabled extension depends on the context. For example, a // filter chain with a disabled extension filter rejects all incoming streams. message ExtensionConfigSource { ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; // Optional default configuration to use as the initial configuration if // there is a failure to receive the initial extension configuration or if // `apply_default_config_without_warming` flag is set. google.protobuf.Any default_config = 2; // Use the default config as the initial configuration without warming and // waiting for the first discovery response. Requires the default configuration // to be supplied. bool apply_default_config_without_warming = 3; // A set of permitted extension type URLs. Extension configuration updates are rejected // if they do not match any type URL in the set. repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/config/core/v3/grpc_method_list.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "GrpcMethodListProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC method list] // A list of gRPC methods which can be used as an allowlist, for example. message GrpcMethodList { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcMethodList"; message Service { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcMethodList.Service"; // The name of the gRPC service. string name = 1 [(validate.rules).string = {min_len: 1}]; // The names of the gRPC methods in this service. repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; } repeated Service services = 1; } ================================================ FILE: api/envoy/config/core/v3/grpc_service.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "GrpcServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC services] // gRPC service configuration. This is used by :ref:`ApiConfigSource // ` and filter configurations. // [#next-free-field: 6] message GrpcService { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService"; message EnvoyGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.EnvoyGrpc"; // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. string authority = 2 [(validate.rules).string = {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc"; // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. message SslCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc.SslCredentials"; // PEM encoded server root certificates. DataSource root_certs = 1; // PEM encoded client private key. DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; // PEM encoded client certificate chain. DataSource cert_chain = 3; } // Local channel credentials. Only UDS is supported for now. // See https://github.com/grpc/grpc/pull/15909. message GoogleLocalCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc.GoogleLocalCredentials"; } // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call // credential types. message ChannelCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc.ChannelCredentials"; oneof credential_specifier { option (validate.required) = true; SslCredentials ssl_credentials = 1; // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 google.protobuf.Empty google_default = 2; GoogleLocalCredentials local_credentials = 3; } } // [#next-free-field: 8] message CallCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials"; message ServiceAccountJWTAccessCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials." "ServiceAccountJWTAccessCredentials"; string json_key = 1; uint64 token_lifetime_seconds = 2; } message GoogleIAMCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials"; string authorization_token = 1; string authority_selector = 2; } message MetadataCredentialsFromPlugin { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials." "MetadataCredentialsFromPlugin"; reserved 2; reserved "config"; string name = 1; oneof config_type { google.protobuf.Any typed_config = 3; } } // Security token service configuration that allows Google gRPC to // fetch security token from an OAuth 2.0 authorization server. // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and // https://github.com/grpc/grpc/pull/19587. // [#next-free-field: 10] message StsService { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcService.GoogleGrpc.CallCredentials.StsService"; // URI of the token exchange service that handles token exchange requests. // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by // https://github.com/envoyproxy/protoc-gen-validate/issues/303] string token_exchange_service_uri = 1; // Location of the target service or resource where the client // intends to use the requested security token. string resource = 2; // Logical name of the target service where the client intends to // use the requested security token. string audience = 3; // The desired scope of the requested security token in the // context of the service or resource where the token will be used. string scope = 4; // Type of the requested security token. string requested_token_type = 5; // The path of subject token, a security token that represents the // identity of the party on behalf of whom the request is being made. string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; // Type of the subject token. string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; // The path of actor token, a security token that represents the identity // of the acting party. The acting party is authorized to use the // requested security token and act on behalf of the subject. string actor_token_path = 8; // Type of the actor token. string actor_token_type = 9; } oneof credential_specifier { option (validate.required) = true; // Access token credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. string access_token = 1; // Google Compute Engine credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 google.protobuf.Empty google_compute_engine = 2; // Google refresh token credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. string google_refresh_token = 3; // Service Account JWT Access credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; // Google IAM credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. GoogleIAMCredentials google_iam = 5; // Custom authenticator credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. MetadataCredentialsFromPlugin from_plugin = 6; // Custom security token service which implements OAuth 2.0 token exchange. // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 // See https://github.com/grpc/grpc/pull/19587. StsService sts_service = 7; } } // Channel arguments. message ChannelArgs { message Value { // Pointer values are not supported, since they don't make any sense when // delivered via the API. oneof value_specifier { option (validate.required) = true; string string_value = 1; int64 int_value = 2; } } // See grpc_types.h GRPC_ARG #defines for keys that work here. map args = 1; } // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. string target_uri = 1 [(validate.rules).string = {min_len: 1}]; ChannelCredentials channel_credentials = 2; // A set of call credentials that can be composed with `channel credentials // `_. repeated CallCredentials call_credentials = 3; // The human readable prefix to use when emitting statistics for the gRPC // service. // // .. csv-table:: // :header: Name, Type, Description // :widths: 1, 1, 2 // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel // credentials based on other configuration parameters. string credentials_factory_name = 5; // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; // How many bytes each stream can buffer internally. // If not set an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; // Custom channels args. ChannelArgs channel_args = 8; } reserved 4; oneof target_specifier { option (validate.required) = true; // Envoy's in-built gRPC client. // See the :ref:`gRPC services overview ` // documentation for discussion on gRPC client selection. EnvoyGrpc envoy_grpc = 1; // `Google C++ gRPC client `_ // See the :ref:`gRPC services overview ` // documentation for discussion on gRPC client selection. GoogleGrpc google_grpc = 2; } // The timeout for the gRPC request. This is the timeout for a specific // request. google.protobuf.Duration timeout = 3; // Additional metadata to include in streams initiated to the GrpcService. // This can be used for scenarios in which additional ad hoc authorization // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. repeated HeaderValue initial_metadata = 5; } ================================================ FILE: api/envoy/config/core/v3/health_check.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/event_service_config.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http.proto"; import "envoy/type/v3/range.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health check] // * Health checking :ref:`architecture overview `. // * If health checking is configured for a cluster, additional statistics are emitted. They are // documented :ref:`here `. // Endpoint health status. enum HealthStatus { // The health status is not known. This is interpreted by Envoy as *HEALTHY*. UNKNOWN = 0; // Healthy. HEALTHY = 1; // Unhealthy. UNHEALTHY = 2; // Connection draining in progress. E.g., // ``_ // or // ``_. // This is interpreted by Envoy as *UNHEALTHY*. DRAINING = 3; // Health check timed out. This is part of HDS and is interpreted by Envoy as // *UNHEALTHY*. TIMEOUT = 4; // Degraded. DEGRADED = 5; } // [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck"; // Describes the encoding of the payload bytes in the payload. message Payload { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck.Payload"; oneof payload { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". string text = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; } } // [#next-free-field: 12] message HttpHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck.HttpHealthCheck"; reserved 5, 7; reserved "service_name", "use_http2"; // The value of the host header in the HTTP health check request. If // left empty (default value), the name of the cluster this health check is associated // with will be used. The host header can be customized for a specific endpoint by setting the // :ref:`hostname ` field. string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. string path = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; // [#not-implemented-hide:] HTTP specific response. Payload receive = 4; // Specifies a list of HTTP headers that should be added to each request that is sent to the // health checked cluster. For more information, including details on header value syntax, see // the documentation on :ref:`custom request headers // `. repeated HeaderValueOption request_headers_to_add = 6 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request that is sent to the // health checked cluster. repeated string request_headers_to_remove = 8 [(validate.rules).repeated = { items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open // semantics of :ref:`Int64Range `. The start and end of each // range are required. Only statuses in the range [100, 600) are allowed. repeated type.v3.Int64Range expected_statuses = 9; // Use specified application protocol for health checks. type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; // An optional service name parameter which is used to validate the identity of // the health checked cluster using a :ref:`StringMatcher // `. See the :ref:`architecture overview // ` for more information. type.matcher.v3.StringMatcher service_name_matcher = 11; } message TcpHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck.TcpHealthCheck"; // Empty payloads imply a connect-only health check. Payload send = 1; // When checking the response, “fuzzy” matching is performed such that each // binary block must be found, and in the order specified, but not // necessarily contiguous. repeated Payload receive = 2; } message RedisHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck.RedisHealthCheck"; // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance // by setting the specified key to any value and waiting for traffic to drain. string key = 1; } // `grpc.health.v1.Health // `_-based // healthcheck. See `gRPC doc `_ // for details. message GrpcHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck.GrpcHealthCheck"; // An optional service name parameter which will be sent to gRPC service in // `grpc.health.v1.HealthCheckRequest // `_. // message. See `gRPC health-checking overview // `_ for more information. string service_name = 1; // The value of the :authority header in the gRPC health check request. If // left empty (default value), the name of the cluster this health check is associated // with will be used. The authority header can be customized for a specific endpoint by setting // the :ref:`hostname ` field. string authority = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Custom health check. message CustomHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck.CustomHealthCheck"; reserved 2; reserved "config"; // The registered name of the custom health checker. string name = 1 [(validate.rules).string = {min_len: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. oneof config_type { google.protobuf.Any typed_config = 3; } } // Health checks occur over the transport socket specified for the cluster. This implies that if a // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. // // This allows overriding the cluster TLS settings, just for health check connections. message TlsOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HealthCheck.TlsOptions"; // Specifies the ALPN protocols for health check connections. This is useful if the // corresponding upstream is using ALPN-based :ref:`FilterChainMatch // ` along with different protocols for health checks // versus data connections. If empty, no ALPN protocols will be set on health check connections. repeated string alpn_protocols = 1; } reserved 10; // The time to wait for a health check response. If the timeout is reached the // health check attempt will be considered a failure. google.protobuf.Duration timeout = 1 [(validate.rules).duration = { required: true gt {} }]; // The interval between health checks. google.protobuf.Duration interval = 2 [(validate.rules).duration = { required: true gt {} }]; // An optional jitter amount in milliseconds. If specified, Envoy will start health // checking after for a random time in ms between 0 and initial_jitter. This only // applies to the first health check. google.protobuf.Duration initial_jitter = 20; // An optional jitter amount in milliseconds. If specified, during every // interval Envoy will add interval_jitter to the wait time. google.protobuf.Duration interval_jitter = 3; // An optional jitter amount as a percentage of interval_ms. If specified, // during every interval Envoy will add interval_ms * // interval_jitter_percent / 100 to the wait time. // // If interval_jitter_ms and interval_jitter_percent are both set, both of // them will be used to increase the wait time. uint32 interval_jitter_percent = 18; // The number of unhealthy health checks required before a host is marked // unhealthy. Note that for *http* health checking if a host responds with 503 // this threshold is ignored and the host is considered unhealthy immediately. google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; // The number of healthy health checks required before a host is marked // healthy. Note that during startup, only a single successful health check is // required to mark a host healthy. google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Non-serving port for health checking. google.protobuf.UInt32Value alt_port = 6; // Reuse health check connection between health checks. Default is true. google.protobuf.BoolValue reuse_connection = 7; oneof health_checker { option (validate.required) = true; // HTTP health check. HttpHealthCheck http_health_check = 8; // TCP health check. TcpHealthCheck tcp_health_check = 9; // gRPC health check. GrpcHealthCheck grpc_health_check = 11; // Custom health check. CustomHealthCheck custom_health_check = 13; } // The "no traffic interval" is a special health check interval that is used when a cluster has // never had traffic routed to it. This lower interval allows cluster information to be kept up to // date, without sending a potentially large amount of active health checking traffic for no // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the // standard health check interval that is defined. Note that this interval takes precedence over // any other. // // The default value for "no traffic interval" is 60 seconds. google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks // Envoy will shift back to using either "unhealthy interval" if present or the standard health // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; // Specifies the path to the :ref:`health check event log `. // If empty, no event log will be written. string event_log_path = 17; // [#not-implemented-hide:] // The gRPC service for the health check event service. // If empty, health check events won't be sent to a remote endpoint. EventServiceConfig event_service = 22; // If set to true, health check failure events will always be logged. If set to false, only the // initial health check failure event will be logged. // The default value is false. bool always_log_health_check_failures = 19; // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's // :ref:`tranport socket matches `. // For example, the following match criteria // // .. code-block:: yaml // // transport_socket_match_criteria: // useMTLS: true // // Will match the following :ref:`cluster socket match ` // // .. code-block:: yaml // // transport_socket_matches: // - name: "useMTLS" // match: // useMTLS: true // transport_socket: // name: envoy.transport_sockets.tls // config: { ... } # tls socket configuration // // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the // :ref:`LbEndpoint.Metadata `. // This allows using different transport socket capabilities for health checking versus proxying to the // endpoint. // // If the key/values pairs specified do not match any // :ref:`transport socket matches `, // the cluster's :ref:`transport socket ` // will be used for health check socket configuration. google.protobuf.Struct transport_socket_match_criteria = 23; } ================================================ FILE: api/envoy/config/core/v3/http_uri.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "HttpUriProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Service URI ] // Envoy external URI descriptor message HttpUri { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpUri"; // The HTTP server URI. It should be a full FQDN with protocol, host and path. // // Example: // // .. code-block:: yaml // // uri: https://www.googleapis.com/oauth2/v1/certs // string uri = 1 [(validate.rules).string = {min_len: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or // inline DNS resolution. See `issue // `_. oneof http_upstream_type { option (validate.required) = true; // A cluster is created in the Envoy "cluster_manager" config // section. This field specifies the cluster name. // // Example: // // .. code-block:: yaml // // cluster: jwks_cluster // string cluster = 2 [(validate.rules).string = {min_len: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. google.protobuf.Duration timeout = 3 [(validate.rules).duration = { required: true gte {} }]; } ================================================ FILE: api/envoy/config/core/v3/protocol.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "ProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Protocol options] // [#not-implemented-hide:] message TcpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.TcpProtocolOptions"; } message UpstreamHttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.UpstreamHttpProtocolOptions"; // Set transport socket `SNI `_ for new // upstream connections based on the downstream HTTP host/authority header, as seen by the // :ref:`router filter `. bool auto_sni = 1; // Automatic validate upstream presented certificate for new upstream connections based on the // downstream HTTP host/authority header, as seen by the // :ref:`router filter `. // This field is intended to set with `auto_sni` field. bool auto_san_validation = 2; } // [#next-free-field: 6] message HttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.HttpProtocolOptions"; // Action to take when Envoy receives client request with header names containing underscore // characters. // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore // characters. enum HeadersWithUnderscoresAction { // Allow headers with underscores. This is the default behavior. ALLOW = 0; // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter // is incremented for each rejected request. REJECT_REQUEST = 1; // Drop the header with name containing underscores. The header is dropped before the filter chain is // invoked and as such filters will not see dropped headers. The // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. DROP_HEADER = 2; } // The idle timeout for connections. The idle timeout is defined as the // period in which there are no active requests. When the // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 // downstream connection a drain sequence will occur prior to closing the connection, see // :ref:`drain_timeout // `. // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. // // .. warning:: // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP // FIN packets, etc. google.protobuf.Duration idle_timeout = 1; // The maximum duration of a connection. The duration is defined as a period since a connection // was established. If not set, there is no max duration. When max_connection_duration is reached // the connection will be closed. Drain sequence will occur prior to closing the connection if // if's applicable. See :ref:`drain_timeout // `. // Note: not implemented for upstream connections. google.protobuf.Duration max_connection_duration = 3; // The maximum number of headers. If unconfigured, the default // maximum number of request headers allowed is 100. Requests that exceed this limit will receive // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. // If this setting is not specified, the value defaults to ALLOW. // Note: upstream responses are not affected by this setting. HeadersWithUnderscoresAction headers_with_underscores_action = 5; } // [#next-free-field: 8] message Http1ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http1ProtocolOptions"; message HeaderKeyFormat { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat"; message ProperCaseWords { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords"; } oneof header_format { option (validate.required) = true; // Formats the header by proper casing words: the first character and any character following // a special character will be capitalized if it's an alpha character. For example, // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". // Note that while this results in most headers following conventional casing, certain headers // are not covered. For example, the "TE" header will be formatted as "Te". ProperCaseWords proper_case_words = 1; } } // Handle HTTP requests with absolute URLs in the requests. These requests // are generally sent by clients to forward/explicit proxies. This allows clients to configure // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the // *http_proxy* environment variable. google.protobuf.BoolValue allow_absolute_url = 1; // Handle incoming HTTP/1.0 and HTTP 0.9 requests. // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 // style connect logic, dechunking, and handling lack of client host iff // *default_host_for_http_10* is configured. bool accept_http_10 = 2; // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as // Envoy does not otherwise support HTTP/1.0 without a Host header. // This is a no-op if *accept_http_10* is not true. string default_host_for_http_10 = 3; // Describes how the keys for response headers should be formatted. By default, all header keys // are lower cased. HeaderKeyFormat header_key_format = 4; // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. // // .. attention:: // // Note that this only happens when Envoy is chunk encoding which occurs when: // - The request is HTTP/1.1. // - Is neither a HEAD only request nor a HTTP Upgrade. // - Not a response to a HEAD request. // - The content length header is not present. bool enable_trailers = 5; // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding` // headers set. By default such messages are rejected, but if option is enabled - Envoy will // remove Content-Length header and process message. // See `RFC7230, sec. 3.3.3 ` for details. // // .. attention:: // Enabling this option might lead to request smuggling vulnerability, especially if traffic // is proxied via multiple layers of proxies. bool allow_chunked_length = 6; // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate // HTTP/1.1 connections upon receiving an invalid HTTP message. However, // when this option is true, then Envoy will leave the HTTP/1.1 connection // open where possible. // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging // `. google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; } message KeepaliveSettings { // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. google.protobuf.Duration interval = 1 [(validate.rules).duration = { required: true gte {nanos: 1000000} }]; // How long to wait for a response to a keepalive PING. If a response is not received within this // time period, the connection will be aborted. google.protobuf.Duration timeout = 2 [(validate.rules).duration = { required: true gte {nanos: 1000000} }]; // A random jitter amount as a percentage of interval that will be added to each interval. // A value of zero means there will be no jitter. // The default value is 15%. type.v3.Percent interval_jitter = 3; } // [#next-free-field: 16] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http2ProtocolOptions"; // Defines a parameter to be sent in the SETTINGS frame. // See `RFC7540, sec. 6.5.1 `_ for details. message SettingsParameter { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.Http2ProtocolOptions.SettingsParameter"; // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; // The 32 bit parameter value. google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; } // `Maximum table size `_ // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header // compression. google.protobuf.UInt32Value hpack_table_size = 1; // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. // // For upstream connections, this also limits how many streams Envoy will initiate concurrently // on a single connection. If the limit is reached, Envoy may queue requests or establish // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; // `Initial stream-level flow-control window // `_ size. Valid values range from 65535 // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 // (256 * 1024 * 1024). // // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default // window size now, so it's also the minimum. // // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the codec buffers. google.protobuf.UInt32Value initial_stream_window_size = 3 [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Similar to *initial_stream_window_size*, but for connection-level flow-control // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. google.protobuf.UInt32Value initial_connection_window_size = 4 [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Allows proxying Websocket and other upgrades over H2 connect. bool allow_connect = 5; // [#not-implemented-hide:] Hiding until envoy has full metadata support. // Still under implementation. DO NOT USE. // // Allows metadata. See [metadata // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more // information. bool allow_metadata = 6; // Limit the number of pending outbound downstream frames of all types (frames that are waiting to // be written into the socket). Exceeding this limit triggers flood mitigation and connection is // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due // to flood mitigation. The default limit is 10000. // [#comment:TODO: implement same limits for upstream outbound frames as well.] google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, // preventing high memory utilization when receiving continuous stream of these frames. Exceeding // this limit triggers flood mitigation and connection is terminated. The // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood // mitigation. The default limit is 1000. // [#comment:TODO: implement same limits for upstream outbound frames as well.] google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` // stat tracks the number of connections terminated due to flood mitigation. // Setting this to 0 will terminate connection upon receiving first frame with an empty payload // and no end stream flag. The default limit is 1. // [#comment:TODO: implement same limits for upstream inbound frames as well.] google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number // of PRIORITY frames received over the lifetime of connection exceeds the value calculated // using this formula:: // // max_inbound_priority_frames_per_stream * (1 + inbound_streams) // // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks // the number of connections terminated due to flood mitigation. The default limit is 100. // [#comment:TODO: implement same limits for upstream inbound frames as well.] google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated // using this formula:: // // 1 + 2 * (inbound_streams + // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) // // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks // the number of connections terminated due to flood mitigation. The default limit is 10. // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, // but more complex implementations that try to estimate available bandwidth require at least 2. // [#comment:TODO: implement same limits for upstream inbound frames as well.] google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 [(validate.rules).uint32 = {gte: 1}]; // Allows invalid HTTP messaging and headers. When this option is disabled (default), then // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // // This is overridden by HCM :ref:`stream_error_on_invalid_http_messaging // ` // iff present. // // This is deprecated in favor of :ref:`override_stream_error_on_invalid_http_message // ` // // See `RFC7540, sec. 8.1 `_ for details. bool stream_error_on_invalid_http_messaging = 12 [deprecated = true]; // Allows invalid HTTP messaging and headers. When this option is disabled (default), then // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging // ` // // See `RFC7540, sec. 8.1 `_ for details. google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: // // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by // Envoy. // // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field // 'allow_connect'. // // Note that custom parameters specified through this field can not also be set in the // corresponding named parameters: // // .. code-block:: text // // ID Field Name // ---------------- // 0x1 hpack_table_size // 0x3 max_concurrent_streams // 0x4 initial_stream_window_size // // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies // between custom parameters with the same identifier will trigger a failure. // // See `IANA HTTP/2 Settings // `_ for // standardized identifiers. repeated SettingsParameter custom_settings_parameters = 13; // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer // does not respond within the configured timeout, the connection will be aborted. KeepaliveSettings connection_keepalive = 15; } // [#not-implemented-hide:] message GrpcProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.GrpcProtocolOptions"; Http2ProtocolOptions http2_protocol_options = 1; } ================================================ FILE: api/envoy/config/core/v3/proxy_protocol.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "ProxyProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Proxy Protocol] message ProxyProtocolConfig { enum Version { // PROXY protocol version 1. Human readable format. V1 = 0; // PROXY protocol version 2. Binary format. V2 = 1; } // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details Version version = 1; } ================================================ FILE: api/envoy/config/core/v3/socket_option.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "SocketOptionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Socket Option ] // Generic socket option message. This would be used to set socket options that // might not exist in upstream kernels or precompiled Envoy binaries. // [#next-free-field: 7] message SocketOption { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.core.SocketOption"; enum SocketState { // Socket options are applied after socket creation but before binding the socket to a port STATE_PREBIND = 0; // Socket options are applied after binding the socket to a port but before calling listen() STATE_BOUND = 1; // Socket options are applied after calling listen() STATE_LISTENING = 2; } // An optional name to give this socket option for debugging, etc. // Uniqueness is not required and no special meaning is assumed. string description = 1; // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP int64 level = 2; // The numeric name as passed to setsockopt int64 name = 3; oneof value { option (validate.required) = true; // Because many sockopts take an int value. int64 int_value = 4; // Otherwise it's a byte buffer. bytes buf_value = 5; } // The state in which the option will be applied. When used in BindConfig // STATE_PREBIND is currently the only valid value. SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/config/core/v3/substitution_format_string.proto ================================================ syntax = "proto3"; package envoy.config.core.v3; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v3"; option java_outer_classname = "SubstitutionFormatStringProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Substitution format string] // Configuration to use multiple :ref:`command operators ` // to generate a new string in either plain text or JSON format. message SubstitutionFormatString { oneof format { option (validate.required) = true; // Specify a format with command operators to form a text string. // Its details is described in :ref:`format string`. // // For example, setting ``text_format`` like below, // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // // generates plain text similar to: // // .. code-block:: text // // upstream connect error:503:path=/foo // string text_format = 1 [(validate.rules).string = {min_len: 1}]; // Specify a format with command operators to form a JSON string. // Its details is described in :ref:`format dictionary`. // Values are rendered as strings, numbers, or boolean values as appropriate. // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). // See the documentation for a specific command operator for details. // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // json_format: // status: "%RESPONSE_CODE%" // message: "%LOCAL_REPLY_BODY%" // // The following JSON object would be created: // // .. code-block:: json // // { // "status": 500, // "message": "My error message" // } // google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; } // If set to true, when command operators are evaluated to null, // // * for ``text_format``, the output of the empty operator is changed from ``-`` to an // empty string, so that empty values are omitted entirely. // * for ``json_format`` the keys with null values are omitted in the output structure. bool omit_empty_values = 3; // Specify a *content_type* field. // If this field is not set then ``text/plain`` is used for *text_format* and // ``application/json`` is used for *json_format*. // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // content_type: "text/html; charset=UTF-8" // string content_type = 4; } ================================================ FILE: api/envoy/config/core/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/config/core/v4alpha/address.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/socket_option.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "AddressProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Network addresses] message Pipe { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Pipe"; // Unix Domain Socket path. On Linux, paths starting with '@' will use the // abstract namespace. The starting '@' is replaced by a null byte by Envoy. // Paths starting with '@' will result in an error in environments other than // Linux. string path = 1 [(validate.rules).string = {min_len: 1}]; // The mode for the Pipe. Not applicable for abstract sockets. uint32 mode = 2 [(validate.rules).uint32 = {lte: 511}]; } // [#not-implemented-hide:] The address represents an envoy internal listener. // TODO(lambdai): Make this address available for listener and endpoint. // TODO(asraa): When address available, remove workaround from test/server/server_fuzz_test.cc:30. message EnvoyInternalAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.EnvoyInternalAddress"; oneof address_name_specifier { option (validate.required) = true; // [#not-implemented-hide:] The :ref:`listener name ` of the destination internal listener. string server_listener_name = 1; } } // [#next-free-field: 7] message SocketAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketAddress"; enum Protocol { TCP = 0; UDP = 1; } Protocol protocol = 1 [(validate.rules).enum = {defined_only: true}]; // The address for this socket. :ref:`Listeners ` will bind // to the address. An empty address is not allowed. Specify ``0.0.0.0`` or ``::`` // to bind to any address. [#comment:TODO(zuercher) reinstate when implemented: // It is possible to distinguish a Listener address via the prefix/suffix matching // in :ref:`FilterChainMatch `.] When used // within an upstream :ref:`BindConfig `, the address // controls the source address of outbound connections. For :ref:`clusters // `, the cluster type determines whether the // address must be an IP (*STATIC* or *EDS* clusters) or a hostname resolved by DNS // (*STRICT_DNS* or *LOGICAL_DNS* clusters). Address resolution can be customized // via :ref:`resolver_name `. string address = 2 [(validate.rules).string = {min_len: 1}]; oneof port_specifier { option (validate.required) = true; uint32 port_value = 3 [(validate.rules).uint32 = {lte: 65535}]; // This is only valid if :ref:`resolver_name // ` is specified below and the // named resolver is capable of named port resolution. string named_port = 4; } // The name of the custom resolver. This must have been registered with Envoy. If // this is empty, a context dependent default applies. If the address is a concrete // IP address, no resolution will occur. If address is a hostname this // should be set for resolution other than DNS. Specifying a custom resolver with // *STRICT_DNS* or *LOGICAL_DNS* will generate an error at runtime. string resolver_name = 5; // When binding to an IPv6 address above, this enables `IPv4 compatibility // `_. Binding to ``::`` will // allow both IPv4 and IPv6 connections, with peer IPv4 addresses mapped into // IPv6 space as ``::FFFF:``. bool ipv4_compat = 6; } message TcpKeepalive { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.TcpKeepalive"; // Maximum number of keepalive probes to send without response before deciding // the connection is dead. Default is to use the OS level configuration (unless // overridden, Linux defaults to 9.) google.protobuf.UInt32Value keepalive_probes = 1; // The number of seconds a connection needs to be idle before keep-alive probes // start being sent. Default is to use the OS level configuration (unless // overridden, Linux defaults to 7200s (i.e., 2 hours.) google.protobuf.UInt32Value keepalive_time = 2; // The number of seconds between keep-alive probes. Default is to use the OS // level configuration (unless overridden, Linux defaults to 75s.) google.protobuf.UInt32Value keepalive_interval = 3; } message BindConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BindConfig"; // The address to bind to when creating a socket. SocketAddress source_address = 1 [(validate.rules).message = {required: true}]; // Whether to set the *IP_FREEBIND* option when creating the socket. When this // flag is set to true, allows the :ref:`source_address // ` to be an IP address // that is not configured on the system running Envoy. When this flag is set // to false, the option *IP_FREEBIND* is disabled on the socket. When this // flag is not set (default), the socket is not modified, i.e. the option is // neither enabled nor disabled. google.protobuf.BoolValue freebind = 2; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. repeated SocketOption socket_options = 3; } // Addresses specify either a logical or physical address and port, which are // used to tell Envoy where to bind/listen, connect to upstream and find // management servers. message Address { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Address"; oneof address { option (validate.required) = true; SocketAddress socket_address = 1; Pipe pipe = 2; // [#not-implemented-hide:] EnvoyInternalAddress envoy_internal_address = 3; } } // CidrRange specifies an IP Address and a prefix length to construct // the subnet mask for a `CIDR `_ range. message CidrRange { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.CidrRange"; // IPv4 or IPv6 address, e.g. ``192.0.0.0`` or ``2001:db8::``. string address_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Length of prefix, e.g. 0, 32. google.protobuf.UInt32Value prefix_len = 2 [(validate.rules).uint32 = {lte: 128}]; } ================================================ FILE: api/envoy/config/core/v4alpha/backoff.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "BackoffProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Backoff Strategy] // Configuration defining a jittered exponential back off strategy. message BackoffStrategy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BackoffStrategy"; // The base interval to be used for the next back off computation. It should // be greater than zero and less than or equal to :ref:`max_interval // `. google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { required: true gte {nanos: 1000000} }]; // Specifies the maximum interval between retries. This parameter is optional, // but must be greater than or equal to the :ref:`base_interval // ` if set. The default // is 10 times the :ref:`base_interval // `. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; } ================================================ FILE: api/envoy/config/core/v4alpha/base.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/backoff.proto"; import "envoy/config/core/v4alpha/http_uri.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/semantic_version.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "BaseProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Common types] // Envoy supports :ref:`upstream priority routing // ` both at the route and the virtual // cluster level. The current priority implementation uses different connection // pool and circuit breaking settings for each priority level. This means that // even for HTTP/2 requests, two physical connections will be used to an // upstream host. In the future Envoy will likely support true HTTP/2 priority // over a single upstream connection. enum RoutingPriority { DEFAULT = 0; HIGH = 1; } // HTTP request method. enum RequestMethod { METHOD_UNSPECIFIED = 0; GET = 1; HEAD = 2; POST = 3; PUT = 4; DELETE = 5; CONNECT = 6; OPTIONS = 7; TRACE = 8; PATCH = 9; } // Identifies the direction of the traffic relative to the local Envoy. enum TrafficDirection { // Default option is unspecified. UNSPECIFIED = 0; // The transport is used for incoming traffic. INBOUND = 1; // The transport is used for outgoing traffic. OUTBOUND = 2; } // Identifies location of where either Envoy runs or where upstream hosts run. message Locality { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Locality"; // Region this :ref:`zone ` belongs to. string region = 1; // Defines the local service zone where Envoy is running. Though optional, it // should be set if discovery service routing is used and the discovery // service exposes :ref:`zone data `, // either in this message or via :option:`--service-zone`. The meaning of zone // is context dependent, e.g. `Availability Zone (AZ) // `_ // on AWS, `Zone `_ on // GCP, etc. string zone = 2; // When used for locality of upstream hosts, this field further splits zone // into smaller chunks of sub-zones so they can be load balanced // independently. string sub_zone = 3; } // BuildVersion combines SemVer version of extension with free-form build information // (i.e. 'alpha', 'private-build') as a set of strings. message BuildVersion { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.BuildVersion"; // SemVer version of extension. type.v3.SemanticVersion version = 1; // Free-form build information. // Envoy defines several well known keys in the source/common/version/version.h file google.protobuf.Struct metadata = 2; } // Version and identification for an Envoy extension. // [#next-free-field: 6] message Extension { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Extension"; // This is the name of the Envoy filter as specified in the Envoy // configuration, e.g. envoy.filters.http.router, com.acme.widget. string name = 1; // Category of the extension. // Extension category names use reverse DNS notation. For instance "envoy.filters.listener" // for Envoy's built-in listener filters or "com.acme.filters.http" for HTTP filters from // acme.com vendor. // [#comment:TODO(yanavlasov): Link to the doc with existing envoy category names.] string category = 2; // [#not-implemented-hide:] Type descriptor of extension configuration proto. // [#comment:TODO(yanavlasov): Link to the doc with existing configuration protos.] // [#comment:TODO(yanavlasov): Add tests when PR #9391 lands.] string type_descriptor = 3; // The version is a property of the extension and maintained independently // of other extensions and the Envoy API. // This field is not set when extension did not provide version information. BuildVersion version = 4; // Indicates that the extension is present but was disabled via dynamic configuration. bool disabled = 5; } // Identifies a specific Envoy instance. The node identifier is presented to the // management server, which may use this identifier to distinguish per Envoy // configuration for serving. // [#next-free-field: 12] message Node { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Node"; reserved 5, 11; reserved "build_version", "listening_addresses"; // An opaque node identifier for the Envoy node. This also provides the local // service node name. It should be set if any of the following features are // used: :ref:`statsd `, :ref:`CDS // `, and :ref:`HTTP tracing // `, either in this message or via // :option:`--service-node`. string id = 1; // Defines the local service cluster name where Envoy is running. Though // optional, it should be set if any of the following features are used: // :ref:`statsd `, :ref:`health check cluster // verification // `, // :ref:`runtime override directory `, // :ref:`user agent addition // `, // :ref:`HTTP global rate limiting `, // :ref:`CDS `, and :ref:`HTTP tracing // `, either in this message or via // :option:`--service-cluster`. string cluster = 2; // Opaque metadata extending the node identifier. Envoy will pass this // directly to the management server. google.protobuf.Struct metadata = 3; // Locality specifying where the Envoy instance is running. Locality locality = 4; // Free-form string that identifies the entity requesting config. // E.g. "envoy" or "grpc" string user_agent_name = 6; oneof user_agent_version_type { // Free-form string that identifies the version of the entity requesting config. // E.g. "1.12.2" or "abcd1234", or "SpecialEnvoyBuild" string user_agent_version = 7; // Structured version of the entity requesting config. BuildVersion user_agent_build_version = 8; } // List of extensions and their versions supported by the node. repeated Extension extensions = 9; // Client feature support list. These are well known features described // in the Envoy API repository for a given major version of an API. Client features // use reverse DNS naming scheme, for example `com.acme.feature`. // See :ref:`the list of features ` that xDS client may // support. repeated string client_features = 10; } // Metadata provides additional inputs to filters based on matched listeners, // filter chains, routes and endpoints. It is structured as a map, usually from // filter name (in reverse DNS format) to metadata specific to the filter. Metadata // key-values for a filter are merged as connection and request handling occurs, // with later values for the same key overriding earlier values. // // An example use of metadata is providing additional values to // http_connection_manager in the envoy.http_connection_manager.access_log // namespace. // // Another example use of metadata is to per service config info in cluster metadata, which may get // consumed by multiple filters. // // For load balancing, Metadata provides a means to subset cluster endpoints. // Endpoints have a Metadata object associated and routes contain a Metadata // object to match against. There are some well defined metadata used today for // this purpose: // // * ``{"envoy.lb": {"canary": }}`` This indicates the canary status of an // endpoint and is also used during header processing // (x-envoy-upstream-canary) and for stats purposes. // [#next-major-version: move to type/metadata/v2] message Metadata { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Metadata"; // Key is the reverse DNS filter name, e.g. com.acme.widget. The envoy.* // namespace is reserved for Envoy's built-in filters. map filter_metadata = 1; } // Runtime derived uint32 with a default when not specified. message RuntimeUInt32 { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeUInt32"; // Default value if runtime value is not available. uint32 default_value = 2; // Runtime key to get value for comparison. This value is used if defined. string runtime_key = 3 [(validate.rules).string = {min_len: 1}]; } // Runtime derived percentage with a default when not specified. message RuntimePercent { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimePercent"; // Default value if runtime value is not available. type.v3.Percent default_value = 1; // Runtime key to get value for comparison. This value is used if defined. string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived double with a default when not specified. message RuntimeDouble { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeDouble"; // Default value if runtime value is not available. double default_value = 1; // Runtime key to get value for comparison. This value is used if defined. string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Runtime derived bool with a default when not specified. message RuntimeFeatureFlag { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeFeatureFlag"; // Default value if runtime value is not available. google.protobuf.BoolValue default_value = 1 [(validate.rules).message = {required: true}]; // Runtime key to get value for comparison. This value is used if defined. The boolean value must // be represented via its // `canonical JSON encoding `_. string runtime_key = 2 [(validate.rules).string = {min_len: 1}]; } // Header name/value pair. message HeaderValue { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderValue"; // Header name. string key = 1 [(validate.rules).string = {min_len: 1 max_bytes: 16384 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Header value. // // The same :ref:`format specifier ` as used for // :ref:`HTTP access logging ` applies here, however // unknown header values are replaced with the empty string instead of `-`. string value = 2 [ (validate.rules).string = {max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false} ]; } // Header name/value pair plus option to control append behavior. message HeaderValueOption { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderValueOption"; // Header name/value pair that this option applies to. HeaderValue header = 1 [(validate.rules).message = {required: true}]; // Should the value be appended? If true (default), the value is appended to // existing values. Otherwise it replaces any existing values. google.protobuf.BoolValue append = 2; } // Wrapper for a set of headers. message HeaderMap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HeaderMap"; repeated HeaderValue headers = 1; } // Data source consisting of either a file or an inline value. message DataSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.DataSource"; oneof specifier { option (validate.required) = true; // Local filesystem data source. string filename = 1 [(validate.rules).string = {min_len: 1}]; // Bytes inlined in the configuration. bytes inline_bytes = 2 [(validate.rules).bytes = {min_len: 1}]; // String inlined in the configuration. string inline_string = 3 [(validate.rules).string = {min_len: 1}]; } } // The message specifies the retry policy of remote data source when fetching fails. message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RetryPolicy"; // Specifies parameters that control :ref:`retry backoff strategy `. // This parameter is optional, in which case the default base interval is 1000 milliseconds. The // default maximum interval is 10 times the base interval. BackoffStrategy retry_back_off = 1; // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. google.protobuf.UInt32Value max_retries = 2; } // The message specifies how to fetch data from remote and how to verify it. message RemoteDataSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RemoteDataSource"; // The HTTP URI to fetch the remote data. HttpUri http_uri = 1 [(validate.rules).message = {required: true}]; // SHA256 string for verifying data. string sha256 = 2 [(validate.rules).string = {min_len: 1}]; // Retry policy for fetching remote data. RetryPolicy retry_policy = 3; } // Async data source which support async data fetch. message AsyncDataSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.AsyncDataSource"; oneof specifier { option (validate.required) = true; // Local async data source. DataSource local = 1; // Remote async data source. RemoteDataSource remote = 2; } } // Configuration for transport socket in :ref:`listeners ` and // :ref:`clusters `. If the configuration is // empty, a default transport socket implementation and configuration will be // chosen based on the platform and existence of tls_context. message TransportSocket { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.TransportSocket"; reserved 2; reserved "config"; // The name of the transport socket to instantiate. The name must match a supported transport // socket implementation. string name = 1 [(validate.rules).string = {min_len: 1}]; // Implementation specific configuration which depends on the implementation being instantiated. // See the supported transport socket implementations for further documentation. oneof config_type { google.protobuf.Any typed_config = 3; } } // Runtime derived FractionalPercent with defaults for when the numerator or denominator is not // specified via a runtime key. // // .. note:: // // Parsing of the runtime key's data is implemented such that it may be represented as a // :ref:`FractionalPercent ` proto represented as JSON/YAML // and may also be represented as an integer with the assumption that the value is an integral // percentage out of 100. For instance, a runtime key lookup returning the value "42" would parse // as a `FractionalPercent` whose numerator is 42 and denominator is HUNDRED. message RuntimeFractionalPercent { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RuntimeFractionalPercent"; // Default value if the runtime value's for the numerator/denominator keys are not available. type.v3.FractionalPercent default_value = 1 [(validate.rules).message = {required: true}]; // Runtime key for a YAML representation of a FractionalPercent. string runtime_key = 2; } // Identifies a specific ControlPlane instance that Envoy is connected to. message ControlPlane { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ControlPlane"; // An opaque control plane identifier that uniquely identifies an instance // of control plane. This can be used to identify which control plane instance, // the Envoy is connected to. string identifier = 1; } ================================================ FILE: api/envoy/config/core/v4alpha/config_source.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/authority.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "ConfigSourceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Configuration sources] // xDS API and non-xDS services version. This is used to describe both resource and transport // protocol versions (in distinct configuration fields). enum ApiVersion { // When not specified, we assume v2, to ease migration to Envoy's stable API // versioning. If a client does not support v2 (e.g. due to deprecation), this // is an invalid value. AUTO = 0; // Use xDS v2 API. V2 = 1; // Use xDS v3 API. V3 = 2; } // API configuration source. This identifies the API type and cluster that Envoy // will use to fetch an xDS API. // [#next-free-field: 9] message ApiConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ApiConfigSource"; // APIs may be fetched via either REST or gRPC. enum ApiType { // Ideally this would be 'reserved 0' but one can't reserve the default // value. Instead we throw an exception if this is ever used. DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; // REST-JSON v2 API. The `canonical JSON encoding // `_ for // the v2 protos is used. REST = 1; // SotW gRPC service. GRPC = 2; // Using the delta xDS gRPC service, i.e. DeltaDiscovery{Request,Response} // rather than Discovery{Request,Response}. Rather than sending Envoy the entire state // with every update, the xDS server only sends what has changed since the last update. DELTA_GRPC = 3; // SotW xDS gRPC with ADS. All resources which resolve to this configuration source will be // multiplexed on a single connection to an ADS endpoint. // [#not-implemented-hide:] AGGREGATED_GRPC = 5; // Delta xDS gRPC with ADS. All resources which resolve to this configuration source will be // multiplexed on a single connection to an ADS endpoint. // [#not-implemented-hide:] AGGREGATED_DELTA_GRPC = 6; } // API type (gRPC, REST, delta gRPC) ApiType api_type = 1 [(validate.rules).enum = {defined_only: true}]; // API version for xDS transport protocol. This describes the xDS gRPC/REST // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. ApiVersion transport_api_version = 8 [(validate.rules).enum = {defined_only: true}]; // Cluster names should be used only with REST. If > 1 // cluster is defined, clusters will be cycled through if any kind of failure // occurs. // // .. note:: // // The cluster with name ``cluster_name`` must be statically defined and its // type must not be ``EDS``. repeated string cluster_names = 2; // Multiple gRPC services be provided for GRPC. If > 1 cluster is defined, // services will be cycled through if any kind of failure occurs. repeated GrpcService grpc_services = 4; // For REST APIs, the delay between successive polls. google.protobuf.Duration refresh_delay = 3; // For REST APIs, the request timeout. If not set, a default value of 1s will be used. google.protobuf.Duration request_timeout = 5 [(validate.rules).duration = {gt {}}]; // For GRPC APIs, the rate limit settings. If present, discovery requests made by Envoy will be // rate limited. RateLimitSettings rate_limit_settings = 6; // Skip the node identifier in subsequent discovery requests for streaming gRPC config types. bool set_node_on_first_message_only = 7; } // Aggregated Discovery Service (ADS) options. This is currently empty, but when // set in :ref:`ConfigSource ` can be used to // specify that ADS is to be used. message AggregatedConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.AggregatedConfigSource"; } // [#not-implemented-hide:] // Self-referencing config source options. This is currently empty, but when // set in :ref:`ConfigSource ` can be used to // specify that other data can be obtained from the same server. message SelfConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SelfConfigSource"; // API version for xDS transport protocol. This describes the xDS gRPC/REST // endpoint and version of [Delta]DiscoveryRequest/Response used on the wire. ApiVersion transport_api_version = 1 [(validate.rules).enum = {defined_only: true}]; } // Rate Limit settings to be applied for discovery requests made by Envoy. message RateLimitSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.RateLimitSettings"; // Maximum number of tokens to be used for rate limiting discovery request calls. If not set, a // default value of 100 will be used. google.protobuf.UInt32Value max_tokens = 1; // Rate at which tokens will be filled per second. If not set, a default fill rate of 10 tokens // per second will be used. google.protobuf.DoubleValue fill_rate = 2 [(validate.rules).double = {gt: 0.0}]; } // Configuration for :ref:`listeners `, :ref:`clusters // `, :ref:`routes // `, :ref:`endpoints // ` etc. may either be sourced from the // filesystem or from an xDS API source. Filesystem configs are watched with // inotify for updates. // [#next-free-field: 8] message ConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ConfigSource"; // Authorities that this config source may be used for. An authority specified // in a *udpa.core.v1.ResourceLocator* is resolved to a *ConfigSource* prior // to configuration fetch. This field provides the association between // authority name and configuration source. // [#not-implemented-hide:] repeated udpa.core.v1.Authority authorities = 7; oneof config_source_specifier { option (validate.required) = true; // Path on the filesystem to source and watch for configuration updates. // When sourcing configuration for :ref:`secret `, // the certificate and key files are also watched for updates. // // .. note:: // // The path to the source must exist at config load time. // // .. note:: // // Envoy will only watch the file path for *moves.* This is because in general only moves // are atomic. The same method of swapping files as is demonstrated in the // :ref:`runtime documentation ` can be used here also. string path = 1; // API configuration source. ApiConfigSource api_config_source = 2; // When set, ADS will be used to fetch resources. The ADS API configuration // source in the bootstrap configuration is used. AggregatedConfigSource ads = 3; // [#not-implemented-hide:] // When set, the client will access the resources from the same server it got the // ConfigSource from, although not necessarily from the same stream. This is similar to the // :ref:`ads` field, except that the client may use a // different stream to the same server. As a result, this field can be used for things // like LRS that cannot be sent on an ADS stream. It can also be used to link from (e.g.) // LDS to RDS on the same server without requiring the management server to know its name // or required credentials. // [#next-major-version: In xDS v3, consider replacing the ads field with this one, since // this field can implicitly mean to use the same stream in the case where the ConfigSource // is provided via ADS and the specified data can also be obtained via ADS.] SelfConfigSource self = 5; } // When this timeout is specified, Envoy will wait no longer than the specified time for first // config response on this xDS subscription during the :ref:`initialization process // `. After reaching the timeout, Envoy will move to the next // initialization phase, even if the first config is not delivered yet. The timer is activated // when the xDS API subscription starts, and is disarmed on first config update or on error. 0 // means no timeout - Envoy will wait indefinitely for the first xDS config (unless another // timeout applies). The default is 15s. google.protobuf.Duration initial_fetch_timeout = 4; // API version for xDS resources. This implies the type URLs that the client // will request for resources and the resource type that the client will in // turn expect to be delivered. ApiVersion resource_api_version = 6 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/config/core/v4alpha/event_service_config.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/grpc_service.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "EventServiceConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#not-implemented-hide:] // Configuration of the event reporting service endpoint. message EventServiceConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.EventServiceConfig"; oneof config_source_specifier { option (validate.required) = true; // Specifies the gRPC service that hosts the event reporting service. GrpcService grpc_service = 1; } } ================================================ FILE: api/envoy/config/core/v4alpha/extension.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "ExtensionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Extension configuration] // Message type for extension configuration. // [#next-major-version: revisit all existing typed_config that doesn't use this wrapper.]. message TypedExtensionConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.TypedExtensionConfig"; // The name of an extension. This is not used to select the extension, instead // it serves the role of an opaque identifier. string name = 1 [(validate.rules).string = {min_len: 1}]; // The typed config for the extension. The type URL will be used to identify // the extension. In the case that the type URL is *udpa.type.v1.TypedStruct*, // the inner type URL of *TypedStruct* will be utilized. See the // :ref:`extension configuration overview // ` for further details. google.protobuf.Any typed_config = 2 [(validate.rules).any = {required: true}]; } // Configuration source specifier for a late-bound extension configuration. The // parent resource is warmed until all the initial extension configurations are // received, unless the flag to apply the default configuration is set. // Subsequent extension updates are atomic on a per-worker basis. Once an // extension configuration is applied to a request or a connection, it remains // constant for the duration of processing. If the initial delivery of the // extension configuration fails, due to a timeout for example, the optional // default configuration is applied. Without a default configuration, the // extension is disabled, until an extension configuration is received. The // behavior of a disabled extension depends on the context. For example, a // filter chain with a disabled extension filter rejects all incoming streams. message ExtensionConfigSource { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ExtensionConfigSource"; ConfigSource config_source = 1 [(validate.rules).any = {required: true}]; // Optional default configuration to use as the initial configuration if // there is a failure to receive the initial extension configuration or if // `apply_default_config_without_warming` flag is set. google.protobuf.Any default_config = 2; // Use the default config as the initial configuration without warming and // waiting for the first discovery response. Requires the default configuration // to be supplied. bool apply_default_config_without_warming = 3; // A set of permitted extension type URLs. Extension configuration updates are rejected // if they do not match any type URL in the set. repeated string type_urls = 4 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/config/core/v4alpha/grpc_method_list.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "GrpcMethodListProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: gRPC method list] // A list of gRPC methods which can be used as an allowlist, for example. message GrpcMethodList { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcMethodList"; message Service { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcMethodList.Service"; // The name of the gRPC service. string name = 1 [(validate.rules).string = {min_len: 1}]; // The names of the gRPC methods in this service. repeated string method_names = 2 [(validate.rules).repeated = {min_items: 1}]; } repeated Service services = 1; } ================================================ FILE: api/envoy/config/core/v4alpha/grpc_service.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "GrpcServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: gRPC services] // gRPC service configuration. This is used by :ref:`ApiConfigSource // ` and filter configurations. // [#next-free-field: 6] message GrpcService { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService"; message EnvoyGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.EnvoyGrpc"; // The name of the upstream gRPC cluster. SSL credentials will be supplied // in the :ref:`Cluster ` :ref:`transport_socket // `. string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // The `:authority` header in the grpc request. If this field is not set, the authority header value will be `cluster_name`. // Note that this authority does not override the SNI. The SNI is provided by the transport socket of the cluster. string authority = 2 [(validate.rules).string = {min_len: 0 max_bytes: 16384 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // [#next-free-field: 9] message GoogleGrpc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc"; // See https://grpc.io/grpc/cpp/structgrpc_1_1_ssl_credentials_options.html. message SslCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.SslCredentials"; // PEM encoded server root certificates. DataSource root_certs = 1; // PEM encoded client private key. DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; // PEM encoded client certificate chain. DataSource cert_chain = 3; } // Local channel credentials. Only UDS is supported for now. // See https://github.com/grpc/grpc/pull/15909. message GoogleLocalCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.GoogleLocalCredentials"; } // See https://grpc.io/docs/guides/auth.html#credential-types to understand Channel and Call // credential types. message ChannelCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelCredentials"; oneof credential_specifier { option (validate.required) = true; SslCredentials ssl_credentials = 1; // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 google.protobuf.Empty google_default = 2; GoogleLocalCredentials local_credentials = 3; } } // [#next-free-field: 8] message CallCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials"; message ServiceAccountJWTAccessCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." "ServiceAccountJWTAccessCredentials"; string json_key = 1; uint64 token_lifetime_seconds = 2; } message GoogleIAMCredentials { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.GoogleIAMCredentials"; string authorization_token = 1; string authority_selector = 2; } message MetadataCredentialsFromPlugin { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials." "MetadataCredentialsFromPlugin"; reserved 2; reserved "config"; string name = 1; oneof config_type { google.protobuf.Any typed_config = 3; } } // Security token service configuration that allows Google gRPC to // fetch security token from an OAuth 2.0 authorization server. // See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 and // https://github.com/grpc/grpc/pull/19587. // [#next-free-field: 10] message StsService { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.CallCredentials.StsService"; // URI of the token exchange service that handles token exchange requests. // [#comment:TODO(asraa): Add URI validation when implemented. Tracked by // https://github.com/envoyproxy/protoc-gen-validate/issues/303] string token_exchange_service_uri = 1; // Location of the target service or resource where the client // intends to use the requested security token. string resource = 2; // Logical name of the target service where the client intends to // use the requested security token. string audience = 3; // The desired scope of the requested security token in the // context of the service or resource where the token will be used. string scope = 4; // Type of the requested security token. string requested_token_type = 5; // The path of subject token, a security token that represents the // identity of the party on behalf of whom the request is being made. string subject_token_path = 6 [(validate.rules).string = {min_len: 1}]; // Type of the subject token. string subject_token_type = 7 [(validate.rules).string = {min_len: 1}]; // The path of actor token, a security token that represents the identity // of the acting party. The acting party is authorized to use the // requested security token and act on behalf of the subject. string actor_token_path = 8; // Type of the actor token. string actor_token_type = 9; } oneof credential_specifier { option (validate.required) = true; // Access token credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#ad3a80da696ffdaea943f0f858d7a360d. string access_token = 1; // Google Compute Engine credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a6beb3ac70ff94bd2ebbd89b8f21d1f61 google.protobuf.Empty google_compute_engine = 2; // Google refresh token credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a96901c997b91bc6513b08491e0dca37c. string google_refresh_token = 3; // Service Account JWT Access credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a92a9f959d6102461f66ee973d8e9d3aa. ServiceAccountJWTAccessCredentials service_account_jwt_access = 4; // Google IAM credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a9fc1fc101b41e680d47028166e76f9d0. GoogleIAMCredentials google_iam = 5; // Custom authenticator credentials. // https://grpc.io/grpc/cpp/namespacegrpc.html#a823c6a4b19ffc71fb33e90154ee2ad07. // https://grpc.io/docs/guides/auth.html#extending-grpc-to-support-other-authentication-mechanisms. MetadataCredentialsFromPlugin from_plugin = 6; // Custom security token service which implements OAuth 2.0 token exchange. // https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16 // See https://github.com/grpc/grpc/pull/19587. StsService sts_service = 7; } } // Channel arguments. message ChannelArgs { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs"; message Value { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcService.GoogleGrpc.ChannelArgs.Value"; // Pointer values are not supported, since they don't make any sense when // delivered via the API. oneof value_specifier { option (validate.required) = true; string string_value = 1; int64 int_value = 2; } } // See grpc_types.h GRPC_ARG #defines for keys that work here. map args = 1; } // The target URI when using the `Google C++ gRPC client // `_. SSL credentials will be supplied in // :ref:`channel_credentials `. string target_uri = 1 [(validate.rules).string = {min_len: 1}]; ChannelCredentials channel_credentials = 2; // A set of call credentials that can be composed with `channel credentials // `_. repeated CallCredentials call_credentials = 3; // The human readable prefix to use when emitting statistics for the gRPC // service. // // .. csv-table:: // :header: Name, Type, Description // :widths: 1, 1, 2 // // streams_total, Counter, Total number of streams opened // streams_closed_, Counter, Total streams closed with string stat_prefix = 4 [(validate.rules).string = {min_len: 1}]; // The name of the Google gRPC credentials factory to use. This must have been registered with // Envoy. If this is empty, a default credentials factory will be used that sets up channel // credentials based on other configuration parameters. string credentials_factory_name = 5; // Additional configuration for site-specific customizations of the Google // gRPC library. google.protobuf.Struct config = 6; // How many bytes each stream can buffer internally. // If not set an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_stream_buffer_limit_bytes = 7; // Custom channels args. ChannelArgs channel_args = 8; } reserved 4; oneof target_specifier { option (validate.required) = true; // Envoy's in-built gRPC client. // See the :ref:`gRPC services overview ` // documentation for discussion on gRPC client selection. EnvoyGrpc envoy_grpc = 1; // `Google C++ gRPC client `_ // See the :ref:`gRPC services overview ` // documentation for discussion on gRPC client selection. GoogleGrpc google_grpc = 2; } // The timeout for the gRPC request. This is the timeout for a specific // request. google.protobuf.Duration timeout = 3; // Additional metadata to include in streams initiated to the GrpcService. // This can be used for scenarios in which additional ad hoc authorization // headers (e.g. ``x-foo-bar: baz-key``) are to be injected. repeated HeaderValue initial_metadata = 5; } ================================================ FILE: api/envoy/config/core/v4alpha/health_check.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/event_service_config.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/v3/http.proto"; import "envoy/type/v3/range.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Health check] // * Health checking :ref:`architecture overview `. // * If health checking is configured for a cluster, additional statistics are emitted. They are // documented :ref:`here `. // Endpoint health status. enum HealthStatus { // The health status is not known. This is interpreted by Envoy as *HEALTHY*. UNKNOWN = 0; // Healthy. HEALTHY = 1; // Unhealthy. UNHEALTHY = 2; // Connection draining in progress. E.g., // ``_ // or // ``_. // This is interpreted by Envoy as *UNHEALTHY*. DRAINING = 3; // Health check timed out. This is part of HDS and is interpreted by Envoy as // *UNHEALTHY*. TIMEOUT = 4; // Degraded. DEGRADED = 5; } // [#next-free-field: 24] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck"; // Describes the encoding of the payload bytes in the payload. message Payload { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck.Payload"; oneof payload { option (validate.required) = true; // Hex encoded payload. E.g., "000000FF". string text = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] Binary payload. bytes binary = 2; } } // [#next-free-field: 12] message HttpHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck.HttpHealthCheck"; reserved 5, 7; reserved "service_name", "use_http2"; // The value of the host header in the HTTP health check request. If // left empty (default value), the name of the cluster this health check is associated // with will be used. The host header can be customized for a specific endpoint by setting the // :ref:`hostname ` field. string host = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Specifies the HTTP path that will be requested during health checking. For example // */healthcheck*. string path = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // [#not-implemented-hide:] HTTP specific payload. Payload send = 3; // [#not-implemented-hide:] HTTP specific response. Payload receive = 4; // Specifies a list of HTTP headers that should be added to each request that is sent to the // health checked cluster. For more information, including details on header value syntax, see // the documentation on :ref:`custom request headers // `. repeated HeaderValueOption request_headers_to_add = 6 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request that is sent to the // health checked cluster. repeated string request_headers_to_remove = 8 [(validate.rules).repeated = { items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of HTTP response statuses considered healthy. If provided, replaces default // 200-only policy - 200 must be included explicitly as needed. Ranges follow half-open // semantics of :ref:`Int64Range `. The start and end of each // range are required. Only statuses in the range [100, 600) are allowed. repeated type.v3.Int64Range expected_statuses = 9; // Use specified application protocol for health checks. type.v3.CodecClientType codec_client_type = 10 [(validate.rules).enum = {defined_only: true}]; // An optional service name parameter which is used to validate the identity of // the health checked cluster using a :ref:`StringMatcher // `. See the :ref:`architecture overview // ` for more information. type.matcher.v4alpha.StringMatcher service_name_matcher = 11; } message TcpHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck.TcpHealthCheck"; // Empty payloads imply a connect-only health check. Payload send = 1; // When checking the response, “fuzzy” matching is performed such that each // binary block must be found, and in the order specified, but not // necessarily contiguous. repeated Payload receive = 2; } message RedisHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck.RedisHealthCheck"; // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance // by setting the specified key to any value and waiting for traffic to drain. string key = 1; } // `grpc.health.v1.Health // `_-based // healthcheck. See `gRPC doc `_ // for details. message GrpcHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck.GrpcHealthCheck"; // An optional service name parameter which will be sent to gRPC service in // `grpc.health.v1.HealthCheckRequest // `_. // message. See `gRPC health-checking overview // `_ for more information. string service_name = 1; // The value of the :authority header in the gRPC health check request. If // left empty (default value), the name of the cluster this health check is associated // with will be used. The authority header can be customized for a specific endpoint by setting // the :ref:`hostname ` field. string authority = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Custom health check. message CustomHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck.CustomHealthCheck"; reserved 2; reserved "config"; // The registered name of the custom health checker. string name = 1 [(validate.rules).string = {min_len: 1}]; // A custom health checker specific configuration which depends on the custom health checker // being instantiated. See :api:`envoy/config/health_checker` for reference. oneof config_type { google.protobuf.Any typed_config = 3; } } // Health checks occur over the transport socket specified for the cluster. This implies that if a // cluster is using a TLS-enabled transport socket, the health check will also occur over TLS. // // This allows overriding the cluster TLS settings, just for health check connections. message TlsOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HealthCheck.TlsOptions"; // Specifies the ALPN protocols for health check connections. This is useful if the // corresponding upstream is using ALPN-based :ref:`FilterChainMatch // ` along with different protocols for health checks // versus data connections. If empty, no ALPN protocols will be set on health check connections. repeated string alpn_protocols = 1; } reserved 10; // The time to wait for a health check response. If the timeout is reached the // health check attempt will be considered a failure. google.protobuf.Duration timeout = 1 [(validate.rules).duration = { required: true gt {} }]; // The interval between health checks. google.protobuf.Duration interval = 2 [(validate.rules).duration = { required: true gt {} }]; // An optional jitter amount in milliseconds. If specified, Envoy will start health // checking after for a random time in ms between 0 and initial_jitter. This only // applies to the first health check. google.protobuf.Duration initial_jitter = 20; // An optional jitter amount in milliseconds. If specified, during every // interval Envoy will add interval_jitter to the wait time. google.protobuf.Duration interval_jitter = 3; // An optional jitter amount as a percentage of interval_ms. If specified, // during every interval Envoy will add interval_ms * // interval_jitter_percent / 100 to the wait time. // // If interval_jitter_ms and interval_jitter_percent are both set, both of // them will be used to increase the wait time. uint32 interval_jitter_percent = 18; // The number of unhealthy health checks required before a host is marked // unhealthy. Note that for *http* health checking if a host responds with 503 // this threshold is ignored and the host is considered unhealthy immediately. google.protobuf.UInt32Value unhealthy_threshold = 4 [(validate.rules).message = {required: true}]; // The number of healthy health checks required before a host is marked // healthy. Note that during startup, only a single successful health check is // required to mark a host healthy. google.protobuf.UInt32Value healthy_threshold = 5 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Non-serving port for health checking. google.protobuf.UInt32Value alt_port = 6; // Reuse health check connection between health checks. Default is true. google.protobuf.BoolValue reuse_connection = 7; oneof health_checker { option (validate.required) = true; // HTTP health check. HttpHealthCheck http_health_check = 8; // TCP health check. TcpHealthCheck tcp_health_check = 9; // gRPC health check. GrpcHealthCheck grpc_health_check = 11; // Custom health check. CustomHealthCheck custom_health_check = 13; } // The "no traffic interval" is a special health check interval that is used when a cluster has // never had traffic routed to it. This lower interval allows cluster information to be kept up to // date, without sending a potentially large amount of active health checking traffic for no // reason. Once a cluster has been used for traffic routing, Envoy will shift back to using the // standard health check interval that is defined. Note that this interval takes precedence over // any other. // // The default value for "no traffic interval" is 60 seconds. google.protobuf.Duration no_traffic_interval = 12 [(validate.rules).duration = {gt {}}]; // The "unhealthy interval" is a health check interval that is used for hosts that are marked as // unhealthy. As soon as the host is marked as healthy, Envoy will shift back to using the // standard health check interval that is defined. // // The default value for "unhealthy interval" is the same as "interval". google.protobuf.Duration unhealthy_interval = 14 [(validate.rules).duration = {gt {}}]; // The "unhealthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as unhealthy. For subsequent health checks // Envoy will shift back to using either "unhealthy interval" if present or the standard health // check interval that is defined. // // The default value for "unhealthy edge interval" is the same as "unhealthy interval". google.protobuf.Duration unhealthy_edge_interval = 15 [(validate.rules).duration = {gt {}}]; // The "healthy edge interval" is a special health check interval that is used for the first // health check right after a host is marked as healthy. For subsequent health checks // Envoy will shift back to using the standard health check interval that is defined. // // The default value for "healthy edge interval" is the same as the default interval. google.protobuf.Duration healthy_edge_interval = 16 [(validate.rules).duration = {gt {}}]; // Specifies the path to the :ref:`health check event log `. // If empty, no event log will be written. string event_log_path = 17; // [#not-implemented-hide:] // The gRPC service for the health check event service. // If empty, health check events won't be sent to a remote endpoint. EventServiceConfig event_service = 22; // If set to true, health check failure events will always be logged. If set to false, only the // initial health check failure event will be logged. // The default value is false. bool always_log_health_check_failures = 19; // This allows overriding the cluster TLS settings, just for health check connections. TlsOptions tls_options = 21; // Optional key/value pairs that will be used to match a transport socket from those specified in the cluster's // :ref:`tranport socket matches `. // For example, the following match criteria // // .. code-block:: yaml // // transport_socket_match_criteria: // useMTLS: true // // Will match the following :ref:`cluster socket match ` // // .. code-block:: yaml // // transport_socket_matches: // - name: "useMTLS" // match: // useMTLS: true // transport_socket: // name: envoy.transport_sockets.tls // config: { ... } # tls socket configuration // // If this field is set, then for health checks it will supersede an entry of *envoy.transport_socket* in the // :ref:`LbEndpoint.Metadata `. // This allows using different transport socket capabilities for health checking versus proxying to the // endpoint. // // If the key/values pairs specified do not match any // :ref:`transport socket matches `, // the cluster's :ref:`transport socket ` // will be used for health check socket configuration. google.protobuf.Struct transport_socket_match_criteria = 23; } ================================================ FILE: api/envoy/config/core/v4alpha/http_uri.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "HttpUriProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: HTTP Service URI ] // Envoy external URI descriptor message HttpUri { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HttpUri"; // The HTTP server URI. It should be a full FQDN with protocol, host and path. // // Example: // // .. code-block:: yaml // // uri: https://www.googleapis.com/oauth2/v1/certs // string uri = 1 [(validate.rules).string = {min_len: 1}]; // Specify how `uri` is to be fetched. Today, this requires an explicit // cluster, but in the future we may support dynamic cluster creation or // inline DNS resolution. See `issue // `_. oneof http_upstream_type { option (validate.required) = true; // A cluster is created in the Envoy "cluster_manager" config // section. This field specifies the cluster name. // // Example: // // .. code-block:: yaml // // cluster: jwks_cluster // string cluster = 2 [(validate.rules).string = {min_len: 1}]; } // Sets the maximum duration in milliseconds that a response can take to arrive upon request. google.protobuf.Duration timeout = 3 [(validate.rules).duration = { required: true gte {} }]; } ================================================ FILE: api/envoy/config/core/v4alpha/protocol.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "ProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Protocol options] // [#not-implemented-hide:] message TcpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.TcpProtocolOptions"; } message UpstreamHttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.UpstreamHttpProtocolOptions"; // Set transport socket `SNI `_ for new // upstream connections based on the downstream HTTP host/authority header, as seen by the // :ref:`router filter `. bool auto_sni = 1; // Automatic validate upstream presented certificate for new upstream connections based on the // downstream HTTP host/authority header, as seen by the // :ref:`router filter `. // This field is intended to set with `auto_sni` field. bool auto_san_validation = 2; } // [#next-free-field: 6] message HttpProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.HttpProtocolOptions"; // Action to take when Envoy receives client request with header names containing underscore // characters. // Underscore character is allowed in header names by the RFC-7230 and this behavior is implemented // as a security measure due to systems that treat '_' and '-' as interchangeable. Envoy by default allows client request headers with underscore // characters. enum HeadersWithUnderscoresAction { // Allow headers with underscores. This is the default behavior. ALLOW = 0; // Reject client request. HTTP/1 requests are rejected with the 400 status. HTTP/2 requests // end with the stream reset. The "httpN.requests_rejected_with_underscores_in_headers" counter // is incremented for each rejected request. REJECT_REQUEST = 1; // Drop the header with name containing underscores. The header is dropped before the filter chain is // invoked and as such filters will not see dropped headers. The // "httpN.dropped_headers_with_underscores" is incremented for each dropped header. DROP_HEADER = 2; } // The idle timeout for connections. The idle timeout is defined as the // period in which there are no active requests. When the // idle timeout is reached the connection will be closed. If the connection is an HTTP/2 // downstream connection a drain sequence will occur prior to closing the connection, see // :ref:`drain_timeout // `. // Note that request based timeouts mean that HTTP/2 PINGs will not keep the connection alive. // If not specified, this defaults to 1 hour. To disable idle timeouts explicitly set this to 0. // // .. warning:: // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP // FIN packets, etc. google.protobuf.Duration idle_timeout = 1; // The maximum duration of a connection. The duration is defined as a period since a connection // was established. If not set, there is no max duration. When max_connection_duration is reached // the connection will be closed. Drain sequence will occur prior to closing the connection if // if's applicable. See :ref:`drain_timeout // `. // Note: not implemented for upstream connections. google.protobuf.Duration max_connection_duration = 3; // The maximum number of headers. If unconfigured, the default // maximum number of request headers allowed is 100. Requests that exceed this limit will receive // a 431 response for HTTP/1.x and cause a stream reset for HTTP/2. google.protobuf.UInt32Value max_headers_count = 2 [(validate.rules).uint32 = {gte: 1}]; // Total duration to keep alive an HTTP request/response stream. If the time limit is reached the stream will be // reset independent of any other timeouts. If not specified, this value is not set. google.protobuf.Duration max_stream_duration = 4; // Action to take when a client request with a header name containing underscore characters is received. // If this setting is not specified, the value defaults to ALLOW. // Note: upstream responses are not affected by this setting. HeadersWithUnderscoresAction headers_with_underscores_action = 5; } // [#next-free-field: 8] message Http1ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http1ProtocolOptions"; message HeaderKeyFormat { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat"; message ProperCaseWords { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http1ProtocolOptions.HeaderKeyFormat.ProperCaseWords"; } oneof header_format { option (validate.required) = true; // Formats the header by proper casing words: the first character and any character following // a special character will be capitalized if it's an alpha character. For example, // "content-type" becomes "Content-Type", and "foo$b#$are" becomes "Foo$B#$Are". // Note that while this results in most headers following conventional casing, certain headers // are not covered. For example, the "TE" header will be formatted as "Te". ProperCaseWords proper_case_words = 1; } } // Handle HTTP requests with absolute URLs in the requests. These requests // are generally sent by clients to forward/explicit proxies. This allows clients to configure // envoy as their HTTP proxy. In Unix, for example, this is typically done by setting the // *http_proxy* environment variable. google.protobuf.BoolValue allow_absolute_url = 1; // Handle incoming HTTP/1.0 and HTTP 0.9 requests. // This is off by default, and not fully standards compliant. There is support for pre-HTTP/1.1 // style connect logic, dechunking, and handling lack of client host iff // *default_host_for_http_10* is configured. bool accept_http_10 = 2; // A default host for HTTP/1.0 requests. This is highly suggested if *accept_http_10* is true as // Envoy does not otherwise support HTTP/1.0 without a Host header. // This is a no-op if *accept_http_10* is not true. string default_host_for_http_10 = 3; // Describes how the keys for response headers should be formatted. By default, all header keys // are lower cased. HeaderKeyFormat header_key_format = 4; // Enables trailers for HTTP/1. By default the HTTP/1 codec drops proxied trailers. // // .. attention:: // // Note that this only happens when Envoy is chunk encoding which occurs when: // - The request is HTTP/1.1. // - Is neither a HEAD only request nor a HTTP Upgrade. // - Not a response to a HEAD request. // - The content length header is not present. bool enable_trailers = 5; // Allows Envoy to process requests/responses with both `Content-Length` and `Transfer-Encoding` // headers set. By default such messages are rejected, but if option is enabled - Envoy will // remove Content-Length header and process message. // See `RFC7230, sec. 3.3.3 ` for details. // // .. attention:: // Enabling this option might lead to request smuggling vulnerability, especially if traffic // is proxied via multiple layers of proxies. bool allow_chunked_length = 6; // Allows invalid HTTP messaging. When this option is false, then Envoy will terminate // HTTP/1.1 connections upon receiving an invalid HTTP message. However, // when this option is true, then Envoy will leave the HTTP/1.1 connection // open where possible. // If set, this overrides any HCM :ref:`stream_error_on_invalid_http_messaging // `. google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 7; } message KeepaliveSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.KeepaliveSettings"; // Send HTTP/2 PING frames at this period, in order to test that the connection is still alive. google.protobuf.Duration interval = 1 [(validate.rules).duration = { required: true gte {nanos: 1000000} }]; // How long to wait for a response to a keepalive PING. If a response is not received within this // time period, the connection will be aborted. google.protobuf.Duration timeout = 2 [(validate.rules).duration = { required: true gte {nanos: 1000000} }]; // A random jitter amount as a percentage of interval that will be added to each interval. // A value of zero means there will be no jitter. // The default value is 15%. type.v3.Percent interval_jitter = 3; } // [#next-free-field: 16] message Http2ProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http2ProtocolOptions"; // Defines a parameter to be sent in the SETTINGS frame. // See `RFC7540, sec. 6.5.1 `_ for details. message SettingsParameter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.Http2ProtocolOptions.SettingsParameter"; // The 16 bit parameter identifier. google.protobuf.UInt32Value identifier = 1 [ (validate.rules).uint32 = {lte: 65535 gte: 0}, (validate.rules).message = {required: true} ]; // The 32 bit parameter value. google.protobuf.UInt32Value value = 2 [(validate.rules).message = {required: true}]; } reserved 12; reserved "stream_error_on_invalid_http_messaging"; // `Maximum table size `_ // (in octets) that the encoder is permitted to use for the dynamic HPACK table. Valid values // range from 0 to 4294967295 (2^32 - 1) and defaults to 4096. 0 effectively disables header // compression. google.protobuf.UInt32Value hpack_table_size = 1; // `Maximum concurrent streams `_ // allowed for peer on one HTTP/2 connection. Valid values range from 1 to 2147483647 (2^31 - 1) // and defaults to 2147483647. // // For upstream connections, this also limits how many streams Envoy will initiate concurrently // on a single connection. If the limit is reached, Envoy may queue requests or establish // additional connections (as allowed per circuit breaker limits). google.protobuf.UInt32Value max_concurrent_streams = 2 [(validate.rules).uint32 = {lte: 2147483647 gte: 1}]; // `Initial stream-level flow-control window // `_ size. Valid values range from 65535 // (2^16 - 1, HTTP/2 default) to 2147483647 (2^31 - 1, HTTP/2 maximum) and defaults to 268435456 // (256 * 1024 * 1024). // // NOTE: 65535 is the initial window size from HTTP/2 spec. We only support increasing the default // window size now, so it's also the minimum. // // This field also acts as a soft limit on the number of bytes Envoy will buffer per-stream in the // HTTP/2 codec buffers. Once the buffer reaches this pointer, watermark callbacks will fire to // stop the flow of data to the codec buffers. google.protobuf.UInt32Value initial_stream_window_size = 3 [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Similar to *initial_stream_window_size*, but for connection-level flow-control // window. Currently, this has the same minimum/maximum/default as *initial_stream_window_size*. google.protobuf.UInt32Value initial_connection_window_size = 4 [(validate.rules).uint32 = {lte: 2147483647 gte: 65535}]; // Allows proxying Websocket and other upgrades over H2 connect. bool allow_connect = 5; // [#not-implemented-hide:] Hiding until envoy has full metadata support. // Still under implementation. DO NOT USE. // // Allows metadata. See [metadata // docs](https://github.com/envoyproxy/envoy/blob/master/source/docs/h2_metadata.md) for more // information. bool allow_metadata = 6; // Limit the number of pending outbound downstream frames of all types (frames that are waiting to // be written into the socket). Exceeding this limit triggers flood mitigation and connection is // terminated. The ``http2.outbound_flood`` stat tracks the number of terminated connections due // to flood mitigation. The default limit is 10000. // [#comment:TODO: implement same limits for upstream outbound frames as well.] google.protobuf.UInt32Value max_outbound_frames = 7 [(validate.rules).uint32 = {gte: 1}]; // Limit the number of pending outbound downstream frames of types PING, SETTINGS and RST_STREAM, // preventing high memory utilization when receiving continuous stream of these frames. Exceeding // this limit triggers flood mitigation and connection is terminated. The // ``http2.outbound_control_flood`` stat tracks the number of terminated connections due to flood // mitigation. The default limit is 1000. // [#comment:TODO: implement same limits for upstream outbound frames as well.] google.protobuf.UInt32Value max_outbound_control_frames = 8 [(validate.rules).uint32 = {gte: 1}]; // Limit the number of consecutive inbound frames of types HEADERS, CONTINUATION and DATA with an // empty payload and no end stream flag. Those frames have no legitimate use and are abusive, but // might be a result of a broken HTTP/2 implementation. The `http2.inbound_empty_frames_flood`` // stat tracks the number of connections terminated due to flood mitigation. // Setting this to 0 will terminate connection upon receiving first frame with an empty payload // and no end stream flag. The default limit is 1. // [#comment:TODO: implement same limits for upstream inbound frames as well.] google.protobuf.UInt32Value max_consecutive_inbound_frames_with_empty_payload = 9; // Limit the number of inbound PRIORITY frames allowed per each opened stream. If the number // of PRIORITY frames received over the lifetime of connection exceeds the value calculated // using this formula:: // // max_inbound_priority_frames_per_stream * (1 + inbound_streams) // // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks // the number of connections terminated due to flood mitigation. The default limit is 100. // [#comment:TODO: implement same limits for upstream inbound frames as well.] google.protobuf.UInt32Value max_inbound_priority_frames_per_stream = 10; // Limit the number of inbound WINDOW_UPDATE frames allowed per DATA frame sent. If the number // of WINDOW_UPDATE frames received over the lifetime of connection exceeds the value calculated // using this formula:: // // 1 + 2 * (inbound_streams + // max_inbound_window_update_frames_per_data_frame_sent * outbound_data_frames) // // the connection is terminated. The ``http2.inbound_priority_frames_flood`` stat tracks // the number of connections terminated due to flood mitigation. The default limit is 10. // Setting this to 1 should be enough to support HTTP/2 implementations with basic flow control, // but more complex implementations that try to estimate available bandwidth require at least 2. // [#comment:TODO: implement same limits for upstream inbound frames as well.] google.protobuf.UInt32Value max_inbound_window_update_frames_per_data_frame_sent = 11 [(validate.rules).uint32 = {gte: 1}]; // Allows invalid HTTP messaging and headers. When this option is disabled (default), then // the whole HTTP/2 connection is terminated upon receiving invalid HEADERS frame. However, // when this option is enabled, only the offending stream is terminated. // // This overrides any HCM :ref:`stream_error_on_invalid_http_messaging // ` // // See `RFC7540, sec. 8.1 `_ for details. google.protobuf.BoolValue override_stream_error_on_invalid_http_message = 14; // [#not-implemented-hide:] // Specifies SETTINGS frame parameters to be sent to the peer, with two exceptions: // // 1. SETTINGS_ENABLE_PUSH (0x2) is not configurable as HTTP/2 server push is not supported by // Envoy. // // 2. SETTINGS_ENABLE_CONNECT_PROTOCOL (0x8) is only configurable through the named field // 'allow_connect'. // // Note that custom parameters specified through this field can not also be set in the // corresponding named parameters: // // .. code-block:: text // // ID Field Name // ---------------- // 0x1 hpack_table_size // 0x3 max_concurrent_streams // 0x4 initial_stream_window_size // // Collisions will trigger config validation failure on load/update. Likewise, inconsistencies // between custom parameters with the same identifier will trigger a failure. // // See `IANA HTTP/2 Settings // `_ for // standardized identifiers. repeated SettingsParameter custom_settings_parameters = 13; // Send HTTP/2 PING frames to verify that the connection is still healthy. If the remote peer // does not respond within the configured timeout, the connection will be aborted. KeepaliveSettings connection_keepalive = 15; } // [#not-implemented-hide:] message GrpcProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.GrpcProtocolOptions"; Http2ProtocolOptions http2_protocol_options = 1; } ================================================ FILE: api/envoy/config/core/v4alpha/proxy_protocol.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "ProxyProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Proxy Protocol] message ProxyProtocolConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.ProxyProtocolConfig"; enum Version { // PROXY protocol version 1. Human readable format. V1 = 0; // PROXY protocol version 2. Binary format. V2 = 1; } // The PROXY protocol version to use. See https://www.haproxy.org/download/2.1/doc/proxy-protocol.txt for details Version version = 1; } ================================================ FILE: api/envoy/config/core/v4alpha/socket_option.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "SocketOptionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Socket Option ] // Generic socket option message. This would be used to set socket options that // might not exist in upstream kernels or precompiled Envoy binaries. // [#next-free-field: 7] message SocketOption { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SocketOption"; enum SocketState { // Socket options are applied after socket creation but before binding the socket to a port STATE_PREBIND = 0; // Socket options are applied after binding the socket to a port but before calling listen() STATE_BOUND = 1; // Socket options are applied after calling listen() STATE_LISTENING = 2; } // An optional name to give this socket option for debugging, etc. // Uniqueness is not required and no special meaning is assumed. string description = 1; // Corresponding to the level value passed to setsockopt, such as IPPROTO_TCP int64 level = 2; // The numeric name as passed to setsockopt int64 name = 3; oneof value { option (validate.required) = true; // Because many sockopts take an int value. int64 int_value = 4; // Otherwise it's a byte buffer. bytes buf_value = 5; } // The state in which the option will be applied. When used in BindConfig // STATE_PREBIND is currently the only valid value. SocketState state = 6 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/config/core/v4alpha/substitution_format_string.proto ================================================ syntax = "proto3"; package envoy.config.core.v4alpha; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.core.v4alpha"; option java_outer_classname = "SubstitutionFormatStringProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Substitution format string] // Configuration to use multiple :ref:`command operators ` // to generate a new string in either plain text or JSON format. message SubstitutionFormatString { option (udpa.annotations.versioning).previous_message_type = "envoy.config.core.v3.SubstitutionFormatString"; oneof format { option (validate.required) = true; // Specify a format with command operators to form a text string. // Its details is described in :ref:`format string`. // // For example, setting ``text_format`` like below, // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // // generates plain text similar to: // // .. code-block:: text // // upstream connect error:503:path=/foo // string text_format = 1 [(validate.rules).string = {min_len: 1}]; // Specify a format with command operators to form a JSON string. // Its details is described in :ref:`format dictionary`. // Values are rendered as strings, numbers, or boolean values as appropriate. // Nested JSON objects may be produced by some command operators (e.g. FILTER_STATE or DYNAMIC_METADATA). // See the documentation for a specific command operator for details. // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // json_format: // status: "%RESPONSE_CODE%" // message: "%LOCAL_REPLY_BODY%" // // The following JSON object would be created: // // .. code-block:: json // // { // "status": 500, // "message": "My error message" // } // google.protobuf.Struct json_format = 2 [(validate.rules).message = {required: true}]; } // If set to true, when command operators are evaluated to null, // // * for ``text_format``, the output of the empty operator is changed from ``-`` to an // empty string, so that empty values are omitted entirely. // * for ``json_format`` the keys with null values are omitted in the output structure. bool omit_empty_values = 3; // Specify a *content_type* field. // If this field is not set then ``text/plain`` is used for *text_format* and // ``application/json`` is used for *json_format*. // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // content_type: "text/html; charset=UTF-8" // string content_type = 4; } ================================================ FILE: api/envoy/config/endpoint/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2:pkg", "//envoy/api/v2/endpoint:pkg", "//envoy/config/core/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/endpoint/v3/endpoint.proto ================================================ syntax = "proto3"; package envoy.config.endpoint.v3; import "envoy/config/endpoint/v3/endpoint_components.proto"; import "envoy/type/v3/percent.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; option java_outer_classname = "EndpointProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Endpoint configuration] // Endpoint discovery :ref:`architecture overview ` // Each route from RDS will map to a single cluster or traffic split across // clusters using weights expressed in the RDS WeightedCluster. // // With EDS, each cluster is treated independently from a LB perspective, with // LB taking place between the Localities within a cluster and at a finer // granularity between the hosts within a locality. The percentage of traffic // for each endpoint is determined by both its load_balancing_weight, and the // load_balancing_weight of its locality. First, a locality will be selected, // then an endpoint within that locality will be chose based on its weight. // [#next-free-field: 6] message ClusterLoadAssignment { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment"; // Load balancing policy settings. // [#next-free-field: 6] message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy"; // [#not-implemented-hide:] message DropOverload { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ClusterLoadAssignment.Policy.DropOverload"; // Identifier for the policy specifying the drop. string category = 1 [(validate.rules).string = {min_len: 1}]; // Percentage of traffic that should be dropped for the category. type.v3.FractionalPercent drop_percentage = 2; } reserved 1, 5; reserved "disable_overprovisioning"; // Action to trim the overall incoming traffic to protect the upstream // hosts. This action allows protection in case the hosts are unable to // recover from an outage, or unable to autoscale or unable to handle // incoming traffic volume for any reason. // // At the client each category is applied one after the other to generate // the 'actual' drop percentage on all outgoing traffic. For example: // // .. code-block:: json // // { "drop_overloads": [ // { "category": "throttle", "drop_percentage": 60 } // { "category": "lb", "drop_percentage": 50 } // ]} // // The actual drop percentages applied to the traffic at the clients will be // "throttle"_drop = 60% // "lb"_drop = 20% // 50% of the remaining 'actual' load, which is 40%. // actual_outgoing_load = 20% // remaining after applying all categories. // [#not-implemented-hide:] repeated DropOverload drop_overloads = 2; // Priority levels and localities are considered overprovisioned with this // factor (in percentage). This means that we don't consider a priority // level or locality unhealthy until the fraction of healthy hosts // multiplied by the overprovisioning factor drops below 100. // With the default value 140(1.4), Envoy doesn't consider a priority level // or a locality unhealthy until their percentage of healthy hosts drops // below 72%. For example: // // .. code-block:: json // // { "overprovisioning_factor": 100 } // // Read more at :ref:`priority levels ` and // :ref:`localities `. google.protobuf.UInt32Value overprovisioning_factor = 3 [(validate.rules).uint32 = {gt: 0}]; // The max time until which the endpoints from this assignment can be used. // If no new assignments are received before this time expires the endpoints // are considered stale and should be marked unhealthy. // Defaults to 0 which means endpoints never go stale. google.protobuf.Duration endpoint_stale_after = 4 [(validate.rules).duration = {gt {}}]; } // Name of the cluster. This will be the :ref:`service_name // ` value if specified // in the cluster :ref:`EdsClusterConfig // `. string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // List of endpoints to load balance to. repeated LocalityLbEndpoints endpoints = 2; // Map of named endpoints that can be referenced in LocalityLbEndpoints. // [#not-implemented-hide:] map named_endpoints = 5; // Load balancing policy settings. Policy policy = 4; } ================================================ FILE: api/envoy/config/endpoint/v3/endpoint_components.proto ================================================ syntax = "proto3"; package envoy.config.endpoint.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/health_check.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; option java_outer_classname = "EndpointComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Endpoints] // Upstream host identifier. message Endpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.Endpoint"; // The optional health check configuration. message HealthCheckConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.Endpoint.HealthCheckConfig"; // Optional alternative health check port value. // // By default the health check address port of an upstream host is the same // as the host's serving address port. This provides an alternative health // check port. Setting this with a non-zero value allows an upstream host // to have different health check address port. uint32 port_value = 1 [(validate.rules).uint32 = {lte: 65535}]; // By default, the host header for L7 health checks is controlled by cluster level configuration // (see: :ref:`host ` and // :ref:`authority `). Setting this // to a non-empty value allows overriding the cluster level configuration for a specific // endpoint. string hostname = 2; } // The upstream host address. // // .. attention:: // // The form of host address depends on the given cluster type. For STATIC or EDS, // it is expected to be a direct IP address (or something resolvable by the // specified :ref:`resolver ` // in the Address). For LOGICAL or STRICT DNS, it is expected to be hostname, // and will be resolved via DNS. core.v3.Address address = 1; // The optional health check configuration is used as configuration for the // health checker to contact the health checked host. // // .. attention:: // // This takes into effect only for upstream clusters with // :ref:`active health checking ` enabled. HealthCheckConfig health_check_config = 2; // The hostname associated with this endpoint. This hostname is not used for routing or address // resolution. If provided, it will be associated with the endpoint, and can be used for features // that require a hostname, like // :ref:`auto_host_rewrite `. string hostname = 3; } // An Endpoint that Envoy can route traffic to. // [#next-free-field: 6] message LbEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LbEndpoint"; // Upstream host identifier or a named reference. oneof host_identifier { Endpoint endpoint = 1; // [#not-implemented-hide:] string endpoint_name = 5; } // Optional health status when known and supplied by EDS server. core.v3.HealthStatus health_status = 2; // The endpoint metadata specifies values that may be used by the load // balancer to select endpoints in a cluster for a given request. The filter // name should be specified as *envoy.lb*. An example boolean key-value pair // is *canary*, providing the optional canary status of the upstream host. // This may be matched against in a route's // :ref:`RouteAction ` metadata_match field // to subset the endpoints considered in cluster load balancing. core.v3.Metadata metadata = 3; // The optional load balancing weight of the upstream host; at least 1. // Envoy uses the load balancing weight in some of the built in load // balancers. The load balancing weight for an endpoint is divided by the sum // of the weights of all endpoints in the endpoint's locality to produce a // percentage of traffic for the endpoint. This percentage is then further // weighted by the endpoint's locality's load balancing weight from // LocalityLbEndpoints. If unspecified, each host is presumed to have equal // weight in a locality. The sum of the weights of all endpoints in the // endpoint's locality must not exceed uint32_t maximal value (4294967295). google.protobuf.UInt32Value load_balancing_weight = 4 [(validate.rules).uint32 = {gte: 1}]; } // A group of endpoints belonging to a Locality. // One can have multiple LocalityLbEndpoints for a locality, but this is // generally only done if the different groups need to have different load // balancing weights or different priorities. // [#next-free-field: 7] message LocalityLbEndpoints { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.LocalityLbEndpoints"; // Identifies location of where the upstream hosts run. core.v3.Locality locality = 1; // The group of endpoints belonging to the locality specified. repeated LbEndpoint lb_endpoints = 2; // Optional: Per priority/region/zone/sub_zone weight; at least 1. The load // balancing weight for a locality is divided by the sum of the weights of all // localities at the same priority level to produce the effective percentage // of traffic for the locality. The sum of the weights of all localities at // the same priority level must not exceed uint32_t maximal value (4294967295). // // Locality weights are only considered when :ref:`locality weighted load // balancing ` is // configured. These weights are ignored otherwise. If no weights are // specified when locality weighted load balancing is enabled, the locality is // assigned no load. google.protobuf.UInt32Value load_balancing_weight = 3 [(validate.rules).uint32 = {gte: 1}]; // Optional: the priority for this LocalityLbEndpoints. If unspecified this will // default to the highest priority (0). // // Under usual circumstances, Envoy will only select endpoints for the highest // priority (0). In the event all endpoints for a particular priority are // unavailable/unhealthy, Envoy will fail over to selecting endpoints for the // next highest priority group. // // Priorities should range from 0 (highest) to N (lowest) without skipping. uint32 priority = 5 [(validate.rules).uint32 = {lte: 128}]; // Optional: Per locality proximity value which indicates how close this // locality is from the source locality. This value only provides ordering // information (lower the value, closer it is to the source locality). // This will be consumed by load balancing schemes that need proximity order // to determine where to route the requests. // [#not-implemented-hide:] google.protobuf.UInt32Value proximity = 6; } ================================================ FILE: api/envoy/config/endpoint/v3/load_report.proto ================================================ syntax = "proto3"; package envoy.config.endpoint.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.endpoint.v3"; option java_outer_classname = "LoadReportProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Load Report] // These are stats Envoy reports to the management server at a frequency defined by // :ref:`LoadStatsResponse.load_reporting_interval`. // Stats per upstream region/zone and optionally per subzone. // [#next-free-field: 9] message UpstreamLocalityStats { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.UpstreamLocalityStats"; // Name of zone, region and optionally endpoint group these metrics were // collected from. Zone and region names could be empty if unknown. core.v3.Locality locality = 1; // The total number of requests successfully completed by the endpoints in the // locality. uint64 total_successful_requests = 2; // The total number of unfinished requests uint64 total_requests_in_progress = 3; // The total number of requests that failed due to errors at the endpoint, // aggregated over all endpoints in the locality. uint64 total_error_requests = 4; // The total number of requests that were issued by this Envoy since // the last report. This information is aggregated over all the // upstream endpoints in the locality. uint64 total_issued_requests = 8; // Stats for multi-dimensional load balancing. repeated EndpointLoadMetricStats load_metric_stats = 5; // Endpoint granularity stats information for this locality. This information // is populated if the Server requests it by setting // :ref:`LoadStatsResponse.report_endpoint_granularity`. repeated UpstreamEndpointStats upstream_endpoint_stats = 7; // [#not-implemented-hide:] The priority of the endpoint group these metrics // were collected from. uint32 priority = 6; } // [#next-free-field: 8] message UpstreamEndpointStats { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.UpstreamEndpointStats"; // Upstream host address. core.v3.Address address = 1; // Opaque and implementation dependent metadata of the // endpoint. Envoy will pass this directly to the management server. google.protobuf.Struct metadata = 6; // The total number of requests successfully completed by the endpoints in the // locality. These include non-5xx responses for HTTP, where errors // originate at the client and the endpoint responded successfully. For gRPC, // the grpc-status values are those not covered by total_error_requests below. uint64 total_successful_requests = 2; // The total number of unfinished requests for this endpoint. uint64 total_requests_in_progress = 3; // The total number of requests that failed due to errors at the endpoint. // For HTTP these are responses with 5xx status codes and for gRPC the // grpc-status values: // // - DeadlineExceeded // - Unimplemented // - Internal // - Unavailable // - Unknown // - DataLoss uint64 total_error_requests = 4; // The total number of requests that were issued to this endpoint // since the last report. A single TCP connection, HTTP or gRPC // request or stream is counted as one request. uint64 total_issued_requests = 7; // Stats for multi-dimensional load balancing. repeated EndpointLoadMetricStats load_metric_stats = 5; } message EndpointLoadMetricStats { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.EndpointLoadMetricStats"; // Name of the metric; may be empty. string metric_name = 1; // Number of calls that finished and included this metric. uint64 num_requests_finished_with_metric = 2; // Sum of metric values across all calls that finished with this metric for // load_reporting_interval. double total_metric_value = 3; } // Per cluster load stats. Envoy reports these stats a management server in a // :ref:`LoadStatsRequest` // Next ID: 7 // [#next-free-field: 7] message ClusterStats { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.ClusterStats"; message DroppedRequests { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.endpoint.ClusterStats.DroppedRequests"; // Identifier for the policy specifying the drop. string category = 1 [(validate.rules).string = {min_len: 1}]; // Total number of deliberately dropped requests for the category. uint64 dropped_count = 2; } // The name of the cluster. string cluster_name = 1 [(validate.rules).string = {min_len: 1}]; // The eds_cluster_config service_name of the cluster. // It's possible that two clusters send the same service_name to EDS, // in that case, the management server is supposed to do aggregation on the load reports. string cluster_service_name = 6; // Need at least one. repeated UpstreamLocalityStats upstream_locality_stats = 2 [(validate.rules).repeated = {min_items: 1}]; // Cluster-level stats such as total_successful_requests may be computed by // summing upstream_locality_stats. In addition, below there are additional // cluster-wide stats. // // The total number of dropped requests. This covers requests // deliberately dropped by the drop_overload policy and circuit breaking. uint64 total_dropped_requests = 3; // Information about deliberately dropped requests for each category specified // in the DropOverload policy. repeated DroppedRequests dropped_requests = 5; // Period over which the actual load report occurred. This will be guaranteed to include every // request reported. Due to system load and delays between the *LoadStatsRequest* sent from Envoy // and the *LoadStatsResponse* message sent from the management server, this may be longer than // the requested load reporting interval in the *LoadStatsResponse*. google.protobuf.Duration load_report_interval = 4; } ================================================ FILE: api/envoy/config/filter/README.md ================================================ Protocol buffer definitions for filters. Visibility of the definitions should be constrained to none except for shared definitions between explicitly enumerated filters (e.g. accesslog and fault definitions). ================================================ FILE: api/envoy/config/filter/accesslog/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/route:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/accesslog/v2/accesslog.proto ================================================ syntax = "proto3"; package envoy.config.filter.accesslog.v2; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/route/route_components.proto"; import "envoy/type/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.accesslog.v2"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.accesslog.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common access log types] message AccessLog { // The name of the access log implementation to instantiate. The name must // match a statically registered access log. Current built-in loggers include: // // #. "envoy.access_loggers.file" // #. "envoy.access_loggers.http_grpc" // #. "envoy.access_loggers.tcp_grpc" string name = 1; // Filter which is used to determine if the access log needs to be written. AccessLogFilter filter = 2; // Custom configuration that depends on the access log being instantiated. Built-in // configurations include: // // #. "envoy.access_loggers.file": :ref:`FileAccessLog // ` // #. "envoy.access_loggers.http_grpc": :ref:`HttpGrpcAccessLogConfig // ` // #. "envoy.access_loggers.tcp_grpc": :ref:`TcpGrpcAccessLogConfig // ` oneof config_type { google.protobuf.Struct config = 3 [deprecated = true]; google.protobuf.Any typed_config = 4; } } // [#next-free-field: 12] message AccessLogFilter { oneof filter_specifier { option (validate.required) = true; // Status code filter. StatusCodeFilter status_code_filter = 1; // Duration filter. DurationFilter duration_filter = 2; // Not health check filter. NotHealthCheckFilter not_health_check_filter = 3; // Traceable filter. TraceableFilter traceable_filter = 4; // Runtime filter. RuntimeFilter runtime_filter = 5; // And filter. AndFilter and_filter = 6; // Or filter. OrFilter or_filter = 7; // Header filter. HeaderFilter header_filter = 8; // Response flag filter. ResponseFlagFilter response_flag_filter = 9; // gRPC status filter. GrpcStatusFilter grpc_status_filter = 10; // Extension filter. ExtensionFilter extension_filter = 11; } } // Filter on an integer comparison. message ComparisonFilter { enum Op { // = EQ = 0; // >= GE = 1; // <= LE = 2; } // Comparison operator. Op op = 1 [(validate.rules).enum = {defined_only: true}]; // Value to compare against. api.v2.core.RuntimeUInt32 value = 2; } // Filters on HTTP response/status code. message StatusCodeFilter { // Comparison. ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters on total request duration in milliseconds. message DurationFilter { // Comparison. ComparisonFilter comparison = 1 [(validate.rules).message = {required: true}]; } // Filters for requests that are not health check requests. A health check // request is marked by the health check filter. message NotHealthCheckFilter { } // Filters for requests that are traceable. See the tracing overview for more // information on how a request becomes traceable. message TraceableFilter { } // Filters for random sampling of requests. message RuntimeFilter { // Runtime key to get an optional overridden numerator for use in the *percent_sampled* field. // If found in runtime, this value will replace the default numerator. string runtime_key = 1 [(validate.rules).string = {min_bytes: 1}]; // The default sampling percentage. If not specified, defaults to 0% with denominator of 100. type.FractionalPercent percent_sampled = 2; // By default, sampling pivots on the header // :ref:`x-request-id` being present. If // :ref:`x-request-id` is present, the filter will // consistently sample across multiple hosts based on the runtime key value and the value // extracted from :ref:`x-request-id`. If it is // missing, or *use_independent_randomness* is set to true, the filter will randomly sample based // on the runtime key value alone. *use_independent_randomness* can be used for logging kill // switches within complex nested :ref:`AndFilter // ` and :ref:`OrFilter // ` blocks that are easier to reason about // from a probability perspective (i.e., setting to true will cause the filter to behave like // an independent random variable when composed within logical operator filters). bool use_independent_randomness = 3; } // Performs a logical “and” operation on the result of each filter in filters. // Filters are evaluated sequentially and if one of them returns false, the // filter returns false immediately. message AndFilter { repeated AccessLogFilter filters = 1 [(validate.rules).repeated = {min_items: 2}]; } // Performs a logical “or” operation on the result of each individual filter. // Filters are evaluated sequentially and if one of them returns true, the // filter returns true immediately. message OrFilter { repeated AccessLogFilter filters = 2 [(validate.rules).repeated = {min_items: 2}]; } // Filters requests based on the presence or value of a request header. message HeaderFilter { // Only requests with a header which matches the specified HeaderMatcher will pass the filter // check. api.v2.route.HeaderMatcher header = 1 [(validate.rules).message = {required: true}]; } // Filters requests that received responses with an Envoy response flag set. // A list of the response flags can be found // in the access log formatter :ref:`documentation`. message ResponseFlagFilter { // Only responses with the any of the flags listed in this field will be logged. // This field is optional. If it is not specified, then any response flag will pass // the filter check. repeated string flags = 1 [(validate.rules).repeated = { items { string { in: "LH" in: "UH" in: "UT" in: "LR" in: "UR" in: "UF" in: "UC" in: "UO" in: "NR" in: "DI" in: "FI" in: "RL" in: "UAEX" in: "RLSE" in: "DC" in: "URX" in: "SI" in: "IH" in: "DPE" } } }]; } // Filters gRPC requests based on their response status. If a gRPC status is not provided, the // filter will infer the status from the HTTP status code. message GrpcStatusFilter { enum Status { OK = 0; CANCELED = 1; UNKNOWN = 2; INVALID_ARGUMENT = 3; DEADLINE_EXCEEDED = 4; NOT_FOUND = 5; ALREADY_EXISTS = 6; PERMISSION_DENIED = 7; RESOURCE_EXHAUSTED = 8; FAILED_PRECONDITION = 9; ABORTED = 10; OUT_OF_RANGE = 11; UNIMPLEMENTED = 12; INTERNAL = 13; UNAVAILABLE = 14; DATA_LOSS = 15; UNAUTHENTICATED = 16; } // Logs only responses that have any one of the gRPC statuses in this field. repeated Status statuses = 1 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; // If included and set to true, the filter will instead block all responses with a gRPC status or // inferred gRPC status enumerated in statuses, and allow all other responses. bool exclude = 2; } // Extension filter is statically registered at runtime. message ExtensionFilter { // The name of the filter implementation to instantiate. The name must // match a statically registered filter. string name = 1; // Custom configuration that depends on the filter being instantiated. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } ================================================ FILE: api/envoy/config/filter/dubbo/router/v2alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/dubbo/router/v2alpha1/router.proto ================================================ syntax = "proto3"; package envoy.config.filter.dubbo.router.v2alpha1; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.dubbo.router.v2alpha1"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.dubbo_proxy.router.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Router] // Dubbo router :ref:`configuration overview `. message Router { } ================================================ FILE: api/envoy/config/filter/fault/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/fault/v2/fault.proto ================================================ syntax = "proto3"; package envoy.config.filter.fault.v2; import "envoy/type/percent.proto"; import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.fault.v2"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.common.fault.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common fault injection types] // Delay specification is used to inject latency into the // HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. // [#next-free-field: 6] message FaultDelay { enum FaultDelayType { // Unused and deprecated. FIXED = 0; } // Fault delays are controlled via an HTTP header (if applicable). See the // :ref:`HTTP fault filter ` documentation for // more information. message HeaderDelay { } reserved 2; // Unused and deprecated. Will be removed in the next release. FaultDelayType type = 1 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; oneof fault_delay_secifier { option (validate.required) = true; // Add a fixed delay before forwarding the operation upstream. See // https://developers.google.com/protocol-buffers/docs/proto3#json for // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified // delay will be injected before a new request/operation. For TCP // connections, the proxying of the connection upstream will be delayed // for the specified period. This is required if type is FIXED. google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; // Fault delays are controlled via an HTTP header (if applicable). HeaderDelay header_delay = 5; } // The percentage of operations/connections/requests on which the delay will be injected. type.FractionalPercent percentage = 4; } // Describes a rate limit to be applied. message FaultRateLimit { // Describes a fixed/constant rate limit. message FixedLimit { // The limit supplied in KiB/s. uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}]; } // Rate limits are controlled via an HTTP header (if applicable). See the // :ref:`HTTP fault filter ` documentation for // more information. message HeaderLimit { } oneof limit_type { option (validate.required) = true; // A fixed rate limit. FixedLimit fixed_limit = 1; // Rate limits are controlled via an HTTP header (if applicable). HeaderLimit header_limit = 3; } // The percentage of operations/connections/requests on which the rate limit will be injected. type.FractionalPercent percentage = 2; } ================================================ FILE: api/envoy/config/filter/http/adaptive_concurrency/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/adaptive_concurrency/v2alpha/adaptive_concurrency.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.adaptive_concurrency.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/type/percent.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.adaptive_concurrency.v2alpha"; option java_outer_classname = "AdaptiveConcurrencyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.adaptive_concurrency.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Adaptive Concurrency] // Adaptive Concurrency Control :ref:`configuration overview // `. // [#extension: envoy.filters.http.adaptive_concurrency] // Configuration parameters for the gradient controller. message GradientControllerConfig { // Parameters controlling the periodic recalculation of the concurrency limit from sampled request // latencies. message ConcurrencyLimitCalculationParams { // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000. google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}]; // The period of time samples are taken to recalculate the concurrency limit. google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = { required: true gt {} }]; } // Parameters controlling the periodic minRTT recalculation. // [#next-free-field: 6] message MinimumRTTCalculationParams { // The time interval between recalculating the minimum request round-trip time. google.protobuf.Duration interval = 1 [(validate.rules).duration = { required: true gt {} }]; // The number of requests to aggregate/sample during the minRTT recalculation window before // updating. Defaults to 50. google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}]; // Randomized time delta that will be introduced to the start of the minRTT calculation window. // This is represented as a percentage of the interval duration. Defaults to 15%. // // Example: If the interval is 10s and the jitter is 15%, the next window will begin // somewhere in the range (10s - 11.5s). type.Percent jitter = 3; // The concurrency limit set while measuring the minRTT. Defaults to 3. google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}]; // Amount added to the measured minRTT to add stability to the concurrency limit during natural // variability in latency. This is expressed as a percentage of the measured value and can be // adjusted to allow more or less tolerance to the sampled latency values. // // Defaults to 25%. type.Percent buffer = 5; } // The percentile to use when summarizing aggregated samples. Defaults to p50. type.Percent sample_aggregate_percentile = 1; ConcurrencyLimitCalculationParams concurrency_limit_params = 2 [(validate.rules).message = {required: true}]; MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}]; } message AdaptiveConcurrency { oneof concurrency_controller_config { option (validate.required) = true; // Gradient concurrency control will be used. GradientControllerConfig gradient_controller_config = 1 [(validate.rules).message = {required: true}]; } // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the // message is unspecified, the filter will be enabled. api.v2.core.RuntimeFeatureFlag enabled = 2; } ================================================ FILE: api/envoy/config/filter/http/aws_lambda/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/aws_lambda/v2alpha/aws_lambda.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.aws_lambda.v2alpha; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.aws_lambda.v2alpha"; option java_outer_classname = "AwsLambdaProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.aws_lambda.v3"; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: AWS Lambda] // AWS Lambda :ref:`configuration overview `. // [#extension: envoy.filters.http.aws_lambda] // AWS Lambda filter config message Config { enum InvocationMode { // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In // this mode the output of the Lambda function becomes the response of the HTTP request. SYNCHRONOUS = 0; // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the // call which is translated to an HTTP 200 OK by the filter. ASYNCHRONOUS = 1; } // The ARN of the AWS Lambda to invoke when the filter is engaged // Must be in the following format: // arn::lambda:::function: string arn = 1 [(validate.rules).string = {min_len: 1}]; // Whether to transform the request (headers and body) to a JSON payload or pass it as is. bool payload_passthrough = 2; // Determines the way to invoke the Lambda function. InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; } // Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different // version of the same Lambda depending on the route. message PerRouteConfig { Config invoke_config = 1; } ================================================ FILE: api/envoy/config/filter/http/aws_request_signing/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/aws_request_signing/v2alpha/aws_request_signing.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.aws_request_signing.v2alpha; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.aws_request_signing.v2alpha"; option java_outer_classname = "AwsRequestSigningProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.aws_request_signing.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: AwsRequestSigning] // AwsRequestSigning :ref:`configuration overview `. // [#extension: envoy.filters.http.aws_request_signing] // Top level configuration for the AWS request signing filter. message AwsRequestSigning { // The `service namespace // `_ // of the HTTP endpoint. // // Example: s3 string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The `region `_ hosting the HTTP // endpoint. // // Example: us-west-2 string region = 2 [(validate.rules).string = {min_bytes: 1}]; // Indicates that before signing headers, the host header will be swapped with // this value. If not set or empty, the original host header value // will be used and no rewrite will happen. // // Note: this rewrite affects both signing and host header forwarding. However, this // option shouldn't be used with // :ref:`HCM host rewrite ` given that the // value set here would be used for signing whereas the value set in the HCM would be used // for host header forwarding which is not the desired outcome. string host_rewrite = 3; } ================================================ FILE: api/envoy/config/filter/http/buffer/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/buffer/v2/buffer.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.buffer.v2; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.buffer.v2"; option java_outer_classname = "BufferProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.buffer.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Buffer] // Buffer :ref:`configuration overview `. // [#extension: envoy.filters.http.buffer] message Buffer { reserved 2; // The maximum request size that the filter will buffer before the connection // manager will stop buffering and return a 413 response. google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}]; } message BufferPerRoute { oneof override { option (validate.required) = true; // Disable the buffer filter for this particular vhost or route. bool disabled = 1 [(validate.rules).bool = {const: true}]; // Override the global configuration of the filter with this new config. Buffer buffer = 2 [(validate.rules).message = {required: true}]; } } ================================================ FILE: api/envoy/config/filter/http/cache/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/route:pkg", "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/cache/v2alpha/cache.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.cache.v2alpha; import "envoy/api/v2/route/route_components.proto"; import "envoy/type/matcher/string.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.cache.v2alpha"; option java_outer_classname = "CacheProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.cache.v3alpha"; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP Cache Filter] // [#extension: envoy.filters.http.cache] message CacheConfig { // [#not-implemented-hide:] // Modifies cache key creation by restricting which parts of the URL are included. message KeyCreatorParams { // If true, exclude the URL scheme from the cache key. Set to true if your origins always // produce the same response for http and https requests. bool exclude_scheme = 1; // If true, exclude the host from the cache key. Set to true if your origins' responses don't // ever depend on host. bool exclude_host = 2; // If *query_parameters_included* is nonempty, only query parameters matched // by one or more of its matchers are included in the cache key. Any other // query params will not affect cache lookup. repeated api.v2.route.QueryParameterMatcher query_parameters_included = 3; // If *query_parameters_excluded* is nonempty, query parameters matched by one // or more of its matchers are excluded from the cache key (even if also // matched by *query_parameters_included*), and will not affect cache lookup. repeated api.v2.route.QueryParameterMatcher query_parameters_excluded = 4; } // Config specific to the cache storage implementation. google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; // List of matching rules that defines allowed *Vary* headers. // // The *vary* response header holds a list of header names that affect the // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't matched by any rules in // *allowed_vary_headers*, that response will not be cached. // // During lookup, *allowed_vary_headers* controls what request headers will be // sent to the cache storage implementation. repeated type.matcher.StringMatcher allowed_vary_headers = 2; // [#not-implemented-hide:] // // // Modifies cache key creation by restricting which parts of the URL are included. KeyCreatorParams key_creator_params = 3; // [#not-implemented-hide:] // // // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache // storage implementation may have its own limit beyond which it will reject insertions). uint32 max_body_bytes = 4; } ================================================ FILE: api/envoy/config/filter/http/compressor/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/compressor/v2/compressor.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.compressor.v2; import "envoy/api/v2/core/base.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.compressor.v2"; option java_outer_classname = "CompressorProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.compressor.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Compressor] // [#next-free-field: 6] message Compressor { // Minimum response length, in bytes, which will trigger compression. The default value is 30. google.protobuf.UInt32Value content_length = 1; // Set of strings that allows specifying which mime-types yield compression; e.g., // application/json, text/html, etc. When this field is not defined, compression will be applied // to the following mime-types: "application/javascript", "application/json", // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" // and their synonyms. repeated string content_type = 2; // If true, disables compression when the response contains an etag header. When it is false, the // filter will preserve weak etags and remove the ones that require strong validation. bool disable_on_etag_header = 3; // If true, removes accept-encoding from the request headers before dispatching it to the upstream // so that responses do not get compressed before reaching the filter. // .. attention: // // To avoid interfering with other compression filters in the same chain use this option in // the filter closest to the upstream. bool remove_accept_encoding_header = 4; // Runtime flag that controls whether the filter is enabled or not. If set to false, the // filter will operate as a pass-through filter. If not specified, defaults to enabled. api.v2.core.RuntimeFeatureFlag runtime_enabled = 5; } ================================================ FILE: api/envoy/config/filter/http/cors/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/cors/v2/cors.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.cors.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.cors.v2"; option java_outer_classname = "CorsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.cors.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Cors] // CORS Filter :ref:`configuration overview `. // [#extension: envoy.filters.http.cors] // Cors filter config. message Cors { } ================================================ FILE: api/envoy/config/filter/http/csrf/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/csrf/v2/csrf.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.csrf.v2; import "envoy/api/v2/core/base.proto"; import "envoy/type/matcher/string.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.csrf.v2"; option java_outer_classname = "CsrfProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.csrf.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: CSRF] // Cross-Site Request Forgery :ref:`configuration overview `. // [#extension: envoy.filters.http.csrf] // CSRF filter config. message CsrfPolicy { // Specifies the % of requests for which the CSRF filter is enabled. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests to filter. // // .. note:: // // This field defaults to 100/:ref:`HUNDRED // `. api.v2.core.RuntimeFractionalPercent filter_enabled = 1 [(validate.rules).message = {required: true}]; // Specifies that CSRF policies will be evaluated and tracked, but not enforced. // // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate // and track the request's *Origin* and *Destination* to determine if it's valid, but will not // enforce any policies. api.v2.core.RuntimeFractionalPercent shadow_enabled = 2; // Specifies additional source origins that will be allowed in addition to // the destination origin. // // More information on how this can be configured via runtime can be found // :ref:`here `. repeated type.matcher.StringMatcher additional_origins = 3; } ================================================ FILE: api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/dynamic_forward_proxy/v2alpha/dynamic_forward_proxy.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.dynamic_forward_proxy.v2alpha; import "envoy/config/common/dynamic_forward_proxy/v2alpha/dns_cache.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.dynamic_forward_proxy.v2alpha"; option java_outer_classname = "DynamicForwardProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.dynamic_forward_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamic forward proxy] // Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview // ` for more information. // [#extension: envoy.filters.http.dynamic_forward_proxy] message FilterConfig { // The DNS cache configuration that the filter will attach to. Note this configuration must // match that of associated :ref:`dynamic forward proxy cluster configuration // `. common.dynamic_forward_proxy.v2alpha.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; } // Per route Configuration for the dynamic forward proxy HTTP filter. message PerRouteConfig { oneof host_rewrite_specifier { // Indicates that before DNS lookup, the host header will be swapped with // this value. If not set or empty, the original host header value // will be used and no rewrite will happen. // // Note: this rewrite affects both DNS lookup and host header forwarding. However, this // option shouldn't be used with // :ref:`HCM host rewrite ` given that the // value set here would be used for DNS lookups whereas the value set in the HCM would be used // for host header forwarding which is not the desired outcome. string host_rewrite = 1 [(udpa.annotations.field_migrate).rename = "host_rewrite_literal"]; // Indicates that before DNS lookup, the host header will be swapped with // the value of this header. If not set or empty, the original host header // value will be used and no rewrite will happen. // // Note: this rewrite affects both DNS lookup and host header forwarding. However, this // option shouldn't be used with // :ref:`HCM host rewrite header ` // given that the value set here would be used for DNS lookups whereas the value set in the HCM // would be used for host header forwarding which is not the desired outcome. string auto_host_rewrite_header = 2 [(udpa.annotations.field_migrate).rename = "host_rewrite_header"]; } } ================================================ FILE: api/envoy/config/filter/http/dynamo/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/dynamo/v2/dynamo.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.dynamo.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.dynamo.v2"; option java_outer_classname = "DynamoProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.dynamo.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamo] // Dynamo :ref:`configuration overview `. // [#extension: envoy.filters.http.dynamo] // Dynamo filter config. message Dynamo { } ================================================ FILE: api/envoy/config/filter/http/ext_authz/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2/core:pkg", "//envoy/type:pkg", "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/ext_authz/v2/ext_authz.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.ext_authz.v2; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/grpc_service.proto"; import "envoy/api/v2/core/http_uri.proto"; import "envoy/type/http_status.proto"; import "envoy/type/matcher/string.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.ext_authz.v2"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.ext_authz.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] // [#next-free-field: 12] message ExtAuthz { // External authorization service configuration. oneof services { // gRPC service configuration (default timeout: 200ms). api.v2.core.GrpcService grpc_service = 1; // HTTP service configuration (default timeout: 200ms). HttpService http_service = 3; } // Changes filter's behaviour on errors: // // 1. When set to true, the filter will *accept* client request even if the communication with // the authorization service has failed, or if the authorization service has returned a HTTP 5xx // error. // // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* // response if the communication with the authorization service has failed, or if the // authorization service has returned a HTTP 5xx error. // // Note that errors can be *always* tracked in the :ref:`stats // `. bool failure_mode_allow = 2; // Sets the package version the gRPC service should use. This is particularly // useful when transitioning from alpha to release versions assuming that both definitions are // semantically compatible. Deprecation note: This field is deprecated and should only be used for // version upgrade. See release notes for more details. bool use_alpha = 4 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // Enables filter to buffer the client request body and send it within the authorization request. // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization // request message indicating if the body data is partial. BufferSettings with_request_body = 5; // Clears route cache in order to allow the external authorization service to correctly affect // routing decisions. Filter clears all cached routes when: // // 1. The field is set to *true*. // // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. // // 3. At least one *authorization response header* is added to the client request, or is used for // altering another client request header. // bool clear_route_cache = 6; // Sets the HTTP status that is returned to the client when there is a network error between the // filter and the authorization server. The default status is HTTP 403 Forbidden. type.HttpStatus status_on_error = 7; // Specifies a list of metadata namespaces whose values, if present, will be passed to the // ext_authz service as an opaque *protobuf::Struct*. // // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata // ` is set, // then the following will pass the jwt payload to the authorization server. // // .. code-block:: yaml // // metadata_context_namespaces: // - envoy.filters.http.jwt_authn // repeated string metadata_context_namespaces = 8; // Specifies if the filter is enabled. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests to filter. // // If this field is not specified, the filter will be enabled for all requests. api.v2.core.RuntimeFractionalPercent filter_enabled = 9; // Specifies whether to deny the requests, when the filter is disabled. // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to determine whether to deny request for // filter protected path at filter disabling. If filter is disabled in // typed_per_filter_config for the path, requests will not be denied. // // If this field is not specified, all requests will be allowed when disabled. api.v2.core.RuntimeFeatureFlag deny_at_disable = 11; // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 10; } // Configuration for buffering the request data. message BufferSettings { // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow // `. uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. // The authorization request will be dispatched and no 413 HTTP error will be returned by the // filter. bool allow_partial_message = 2; } // HttpService is used for raw HTTP communication between the filter and the authorization service. // When configured, the filter will parse the client request and use these attributes to call the // authorization server. Depending on the response, the filter may reject or accept the client // request. Note that in any of these events, metadata can be added, removed or overridden by the // filter: // // *On authorization request*, a list of allowed request headers may be supplied. See // :ref:`allowed_headers // ` // for details. Additional headers metadata may be added to the authorization request. See // :ref:`headers_to_add // ` for // details. // // On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and // additional headers metadata may be added to the original client request. See // :ref:`allowed_upstream_headers // ` // for details. // // On other authorization response statuses, the filter will not allow traffic. Additional headers // metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers // ` // for details. // [#next-free-field: 9] message HttpService { reserved 3, 4, 5, 6; // Sets the HTTP server URI which the authorization requests must be sent to. api.v2.core.HttpUri server_uri = 1; // Sets a prefix to the value of authorization request header *Path*. string path_prefix = 2; // Settings used for controlling authorization request metadata. AuthorizationRequest authorization_request = 7; // Settings used for controlling authorization response metadata. AuthorizationResponse authorization_response = 8; } message AuthorizationRequest { // Authorization request will include the client request headers that have a correspondent match // in the :ref:`list `. Note that in addition to the // user's supplied matchers: // // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. // // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have // a message body. However, the authorization request can include the buffered client request body // (controlled by :ref:`with_request_body // ` setting), // consequently the value of *Content-Length* of the authorization request reflects the size of // its payload size. // type.matcher.ListStringMatcher allowed_headers = 1; // Sets a list of headers that will be included to the request to authorization service. Note that // client request of the same key will be overridden. repeated api.v2.core.HeaderValue headers_to_add = 2; } message AuthorizationResponse { // When this :ref:`list ` is set, authorization // response headers that have a correspondent match will be added to the original client request. // Note that coexistent headers will be overridden. type.matcher.ListStringMatcher allowed_upstream_headers = 1; // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority // (Host)* will be in the response to the client. When a header is included in this list, *Path*, // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. type.matcher.ListStringMatcher allowed_client_headers = 2; } // Extra settings on a per virtualhost/route/weighted-cluster level. message ExtAuthzPerRoute { oneof override { option (validate.required) = true; // Disable the ext auth filter for this particular vhost or route. // If disabled is specified in multiple per-filter-configs, the most specific one will be used. bool disabled = 1 [(validate.rules).bool = {const: true}]; // Check request settings for this route. CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; } } // Extra settings for the check request. You can use this to provide extra context for the // external authorization server on specific virtual hosts \ routes. For example, adding a context // extension on the virtual host level can give the ext-authz server information on what virtual // host is used without needing to parse the host header. If CheckSettings is specified in multiple // per-filter-configs, they will be merged in order, and the result will be used. message CheckSettings { // Context extensions to set on the CheckRequest's // :ref:`AttributeContext.context_extensions` // // Merge semantics for this field are such that keys from more specific configs override. // // .. note:: // // These settings are only applied to a filter configured with a // :ref:`grpc_service`. map context_extensions = 1; } ================================================ FILE: api/envoy/config/filter/http/fault/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/route:pkg", "//envoy/config/filter/fault/v2:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/fault/v2/fault.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.fault.v2; import "envoy/api/v2/route/route_components.proto"; import "envoy/config/filter/fault/v2/fault.proto"; import "envoy/type/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.fault.v2"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.fault.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Fault Injection] // Fault Injection :ref:`configuration overview `. // [#extension: envoy.filters.http.fault] message FaultAbort { // Fault aborts are controlled via an HTTP header (if applicable). See the // :ref:`HTTP fault filter ` documentation for // more information. message HeaderAbort { } reserved 1; oneof error_type { option (validate.required) = true; // HTTP status code to use to abort the HTTP request. uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; // Fault aborts are controlled via an HTTP header (if applicable). HeaderAbort header_abort = 4; } // The percentage of requests/operations/connections that will be aborted with the error code // provided. type.FractionalPercent percentage = 3; } // [#next-free-field: 14] message HTTPFault { // If specified, the filter will inject delays based on the values in the // object. filter.fault.v2.FaultDelay delay = 1; // If specified, the filter will abort requests based on the values in // the object. At least *abort* or *delay* must be specified. FaultAbort abort = 2; // Specifies the name of the (destination) upstream cluster that the // filter should match on. Fault injection will be restricted to requests // bound to the specific upstream cluster. string upstream_cluster = 3; // Specifies a set of headers that the filter should match on. The fault // injection filter can be applied selectively to requests that match a set of // headers specified in the fault filter config. The chances of actual fault // injection further depend on the value of the :ref:`percentage // ` field. // The filter will check the request's headers against all the specified // headers in the filter config. A match will happen if all the headers in the // config are present in the request with the same values (or based on // presence if the *value* field is not in the config). repeated api.v2.route.HeaderMatcher headers = 4; // Faults are injected for the specified list of downstream hosts. If this // setting is not set, faults are injected for all downstream nodes. // Downstream node name is taken from :ref:`the HTTP // x-envoy-downstream-service-node // ` header and compared // against downstream_nodes list. repeated string downstream_nodes = 5; // The maximum number of faults that can be active at a single time via the configured fault // filter. Note that because this setting can be overridden at the route level, it's possible // for the number of active faults to be greater than this value (if injected via a different // route). If not specified, defaults to unlimited. This setting can be overridden via // `runtime ` and any faults that are not injected // due to overflow will be indicated via the `faults_overflow // ` stat. // // .. attention:: // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy // limit. It's possible for the number of active faults to rise slightly above the configured // amount due to the implementation details. google.protobuf.UInt32Value max_active_faults = 6; // The response rate limit to be applied to the response body of the stream. When configured, // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent // ` runtime key. // // .. attention:: // This is a per-stream limit versus a connection level limit. This means that concurrent streams // will each get an independent limit. filter.fault.v2.FaultRateLimit response_rate_limit = 7; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.delay.fixed_delay_percent string delay_percent_runtime = 8; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.abort.abort_percent string abort_percent_runtime = 9; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.delay.fixed_duration_ms string delay_duration_runtime = 10; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.abort.http_status string abort_http_status_runtime = 11; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.max_active_faults string max_active_faults_runtime = 12; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.rate_limit.response_percent string response_rate_limit_percent_runtime = 13; } ================================================ FILE: api/envoy/config/filter/http/grpc_http1_bridge/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/grpc_http1_bridge/v2/config.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.grpc_http1_bridge.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_bridge.v2"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_http1_bridge.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC HTTP/1.1 Bridge] // gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_http1_bridge] // gRPC HTTP/1.1 Bridge filter config. message Config { } ================================================ FILE: api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1/config.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] // gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview // `. // [#extension: envoy.filters.http.grpc_http1_reverse_bridge] // gRPC reverse bridge filter configuration message FilterConfig { // The content-type to pass to the upstream when the gRPC bridge filter is applied. // The filter will also validate that the upstream responds with the same content type. string content_type = 1 [(validate.rules).string = {min_bytes: 1}]; // If true, Envoy will assume that the upstream doesn't understand gRPC frames and // strip the gRPC frame from the request, and add it back in to the response. This will // hide the gRPC semantics from the upstream, allowing it to receive and respond with a // simple binary encoded protobuf. bool withhold_grpc_frames = 2; } // gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level. message FilterConfigPerRoute { // If true, disables gRPC reverse bridge filter for this particular vhost or route. // If disabled is specified in multiple per-filter-configs, the most specific one will be used. bool disabled = 1; } ================================================ FILE: api/envoy/config/filter/http/grpc_stats/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/grpc_stats/v2alpha/config.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.grpc_stats.v2alpha; import "envoy/api/v2/core/grpc_method_list.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_stats.v2alpha"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_stats.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC statistics] gRPC statistics filter // :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_stats] // gRPC statistics filter configuration message FilterConfig { // If true, the filter maintains a filter state object with the request and response message // counts. bool emit_filter_state = 1; oneof per_method_stat_specifier { // If set, specifies an allowlist of service/methods that will have individual stats // emitted for them. Any call that does not match the allowlist will be counted // in a stat with no method specifier: `cluster..grpc.*`. api.v2.core.GrpcMethodList individual_method_stats_allowlist = 2; // If set to true, emit stats for all service/method names. // // If set to false, emit stats for all service/message types to the same stats without including // the service/method in the name, with prefix `cluster..grpc`. This can be useful if // service/method granularity is not needed, or if each cluster only receives a single method. // // .. attention:: // This option is only safe if all clients are trusted. If this option is enabled // with untrusted clients, the clients could cause unbounded growth in the number of stats in // Envoy, using unbounded memory and potentially slowing down stats pipelines. // // .. attention:: // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the // behavior will default to `stats_for_all_methods=true`. This default value is deprecated, // and in a future release, if neither field is set, it will default to // `stats_for_all_methods=false` in order to be safe by default. This behavior can be // controlled with runtime override // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. google.protobuf.BoolValue stats_for_all_methods = 3; } } // gRPC statistics filter state object in protobuf form. message FilterObject { // Count of request messages in the request stream. uint64 request_message_count = 1; // Count of response messages in the response stream. uint64 response_message_count = 2; } ================================================ FILE: api/envoy/config/filter/http/grpc_web/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/grpc_web/v2/grpc_web.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.grpc_web.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.grpc_web.v2"; option java_outer_classname = "GrpcWebProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_web.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Web] // gRPC Web :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_web] // gRPC Web filter config. message GrpcWeb { } ================================================ FILE: api/envoy/config/filter/http/gzip/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/compressor/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/gzip/v2/gzip.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.gzip.v2; import "envoy/config/filter/http/compressor/v2/compressor.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.gzip.v2"; option java_outer_classname = "GzipProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.gzip.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Gzip] // Gzip :ref:`configuration overview `. // [#extension: envoy.filters.http.gzip] // [#next-free-field: 11] message Gzip { enum CompressionStrategy { DEFAULT = 0; FILTERED = 1; HUFFMAN = 2; RLE = 3; } message CompressionLevel { enum Enum { DEFAULT = 0; BEST = 1; SPEED = 2; } } // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values // use more memory, but are faster and produce better compression results. The default value is 5. google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; // Minimum response length, in bytes, which will trigger compression. The default value is 30. // .. attention: // // **This field is deprecated**. Set the `compressor` field instead. google.protobuf.UInt32Value content_length = 2 [deprecated = true]; // A value used for selecting the zlib compression level. This setting will affect speed and // amount of compression applied to the content. "BEST" provides higher compression at the cost of // higher latency, "SPEED" provides lower compression with minimum impact on response time. // "DEFAULT" provides an optimal result between speed and compression. This field will be set to // "DEFAULT" if not specified. CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; // A value used for selecting the zlib compression strategy which is directly related to the // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though // there are situations which changing this parameter might produce better results. For example, // run-length encoding (RLE) is typically used when the content is known for having sequences // which same data occurs many consecutive times. For more information about each strategy, please // refer to zlib manual. CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; // Set of strings that allows specifying which mime-types yield compression; e.g., // application/json, text/html, etc. When this field is not defined, compression will be applied // to the following mime-types: "application/javascript", "application/json", // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml". // .. attention: // // **This field is deprecated**. Set the `compressor` field instead. repeated string content_type = 6 [deprecated = true]; // If true, disables compression when the response contains an etag header. When it is false, the // filter will preserve weak etags and remove the ones that require strong validation. // .. attention: // // **This field is deprecated**. Set the `compressor` field instead. bool disable_on_etag_header = 7 [deprecated = true]; // If true, removes accept-encoding from the request headers before dispatching it to the upstream // so that responses do not get compressed before reaching the filter. // .. attention: // // **This field is deprecated**. Set the `compressor` field instead. bool remove_accept_encoding_header = 8 [deprecated = true]; // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. // Larger window results in better compression at the expense of memory usage. The default is 12 // which will produce a 4096 bytes window. For more details about this parameter, please refer to // zlib manual > deflateInit2. google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; // Set of configuration parameters common for all compression filters. If this field is set then // the fields `content_length`, `content_type`, `disable_on_etag_header` and // `remove_accept_encoding_header` are ignored. compressor.v2.Compressor compressor = 10; } ================================================ FILE: api/envoy/config/filter/http/header_to_metadata/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.header_to_metadata.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.header_to_metadata.v2"; option java_outer_classname = "HeaderToMetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.header_to_metadata.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Header-To-Metadata Filter] // // The configuration for transforming headers into metadata. This is useful // for matching load balancer subsets, logging, etc. // // Header to Metadata :ref:`configuration overview `. // [#extension: envoy.filters.http.header_to_metadata] message Config { enum ValueType { STRING = 0; NUMBER = 1; // The value is a serialized `protobuf.Value // `_. PROTOBUF_VALUE = 2; } // ValueEncode defines the encoding algorithm. enum ValueEncode { // The value is not encoded. NONE = 0; // The value is encoded in `Base64 `_. // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the // non-ASCII characters in the header. BASE64 = 1; } // [#next-free-field: 6] message KeyValuePair { // The namespace — if this is empty, the filter's namespace will be used. string metadata_namespace = 1; // The key to use within the namespace. string key = 2 [(validate.rules).string = {min_bytes: 1}]; // The value to pair with the given key. // // When used for a `on_header_present` case, if value is non-empty it'll be used // instead of the header value. If both are empty, no metadata is added. // // When used for a `on_header_missing` case, a non-empty value must be provided // otherwise no metadata is added. string value = 3; // The value's type — defaults to string. ValueType type = 4; // How is the value encoded, default is NONE (not encoded). // The value will be decoded accordingly before storing to metadata. ValueEncode encode = 5; } // A Rule defines what metadata to apply when a header is present or missing. message Rule { // The header that triggers this rule — required. string header = 1 [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If the header is present, apply this metadata KeyValuePair. // // If the value in the KeyValuePair is non-empty, it'll be used instead // of the header value. KeyValuePair on_header_present = 2; // If the header is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu // of the missing header value. KeyValuePair on_header_missing = 3; // Whether or not to remove the header after a rule is applied. // // This prevents headers from leaking. bool remove = 4; } // The list of rules to apply to requests. repeated Rule request_rules = 1; // The list of rules to apply to responses. repeated Rule response_rules = 2; } ================================================ FILE: api/envoy/config/filter/http/health_check/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/route:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/health_check/v2/health_check.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.health_check.v2; import "envoy/api/v2/route/route_components.proto"; import "envoy/type/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.health_check.v2"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.health_check.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. // [#extension: envoy.filters.http.health_check] // [#next-free-field: 6] message HealthCheck { reserved 2; // Specifies whether the filter operates in pass through mode or not. google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; // If operating in pass through mode, the amount of time in milliseconds // that the filter should cache the upstream response. google.protobuf.Duration cache_time = 3; // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. // // .. note:: // // This value is interpreted as an integer by truncating, so 12.50% will be calculated // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will // check a request’s headers against all the specified headers. To specify the health check // endpoint, set the ``:path`` header to match on. repeated api.v2.route.HeaderMatcher headers = 5; } ================================================ FILE: api/envoy/config/filter/http/ip_tagging/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/ip_tagging/v2/ip_tagging.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.ip_tagging.v2; import "envoy/api/v2/core/address.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.ip_tagging.v2"; option java_outer_classname = "IpTaggingProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.ip_tagging.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: IP tagging] // IP tagging :ref:`configuration overview `. // [#extension: envoy.filters.http.ip_tagging] message IPTagging { // The type of requests the filter should apply to. The supported types // are internal, external or both. The // :ref:`x-forwarded-for` header is // used to determine if a request is internal and will result in // :ref:`x-envoy-internal` // being set. The filter defaults to both, and it will apply to all request types. enum RequestType { // Both external and internal requests will be tagged. This is the default value. BOTH = 0; // Only internal requests will be tagged. INTERNAL = 1; // Only external requests will be tagged. EXTERNAL = 2; } // Supplies the IP tag name and the IP address subnets. message IPTag { // Specifies the IP tag name to apply. string ip_tag_name = 1; // A list of IP address subnets that will be tagged with // ip_tag_name. Both IPv4 and IPv6 are supported. repeated api.v2.core.CidrRange ip_list = 2; } // The type of request the filter should apply to. RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] // The set of IP tags for the filter. repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/config/filter/http/jwt_authn/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/route:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/jwt_authn/v2alpha/README.md ================================================ # JWT Authentication HTTP filter config ## Overview 1. The proto file in this folder defines an HTTP filter config for "jwt_authn" filter. 2. This filter will verify the JWT in the HTTP request as: - The signature should be valid - JWT should not be expired - Issuer and audiences are valid and specified in the filter config. 3. [JWK](https://tools.ietf.org/html/rfc7517#appendix-A) is needed to verify JWT signature. It can be fetched from a remote server or read from a local file. If the JWKS is fetched remotely, it will be cached by the filter. 3. If a JWT is valid, the user is authenticated and the request will be forwarded to the backend server. If a JWT is not valid, the request will be rejected with an error message. ## The locations to extract JWT JWT will be extracted from the HTTP headers or query parameters. The default location is the HTTP header: ``` Authorization: Bearer ``` The next default location is in the query parameter as: ``` ?access_token= ``` If a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT. ## HTTP header to pass successfully verified JWT If a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64url-encoded JWT payload in JSON. ## Further header options In addition to the `name` field, which specifies the HTTP header name, the `from_headers` section can specify an optional `value_prefix` value, as in: ```yaml from_headers: - name: bespoke value_prefix: jwt_value ``` The above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`. Any non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped, and all following, contiguous, JWT-legal chars will be taken as the JWT. This means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`: ```text bespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk bespoke: {"jwt_value": "eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk"} bespoke: beta:true,jwt_value:"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk",trace=1234 ``` The header `name` may be `Authorization`. The `value_prefix` must match exactly, i.e., case-sensitively. If the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token. If there are no JWT-legal characters after the `value_prefix`, the entire string after it is taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser. ================================================ FILE: api/envoy/config/filter/http/jwt_authn/v2alpha/config.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.jwt_authn.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/http_uri.proto"; import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.jwt_authn.v2alpha"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.jwt_authn.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: JWT Authentication] // JWT Authentication :ref:`configuration overview `. // [#extension: envoy.filters.http.jwt_authn] // Please see following for JWT authentication flow: // // * `JSON Web Token (JWT) `_ // * `The OAuth 2.0 Authorization Framework `_ // * `OpenID Connect `_ // // A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: // // * issuer: the principal that issues the JWT. It has to match the one from the token. // * allowed audiences: the ones in the token have to be listed here. // * how to fetch public key JWKS to verify the token signature. // * how to extract JWT token in the request. // * how to pass successfully verified token payload. // // Example: // // .. code-block:: yaml // // issuer: https://example.com // audiences: // - bookstore_android.apps.googleusercontent.com // - bookstore_web.apps.googleusercontent.com // remote_jwks: // http_uri: // uri: https://example.com/.well-known/jwks.json // cluster: example_jwks_cluster // cache_duration: // seconds: 300 // // [#next-free-field: 10] message JwtProvider { // Specify the `principal `_ that issued // the JWT, usually a URL or an email address. // // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // string issuer = 1 [(validate.rules).string = {min_bytes: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, // will not check audiences in the token. // // Example: // // .. code-block:: yaml // // audiences: // - bookstore_android.apps.googleusercontent.com // - bookstore_web.apps.googleusercontent.com // repeated string audiences = 2; // `JSON Web Key Set (JWKS) `_ is needed to // validate signature of a JWT. This field specifies where to fetch JWKS. oneof jwks_source_specifier { option (validate.required) = true; // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP // URI and how the fetched JWKS should be cached. // // Example: // // .. code-block:: yaml // // remote_jwks: // http_uri: // uri: https://www.googleapis.com/oauth2/v1/certs // cluster: jwt.www.googleapis.com|443 // cache_duration: // seconds: 300 // RemoteJwks remote_jwks = 3; // JWKS is in local data source. It could be either in a local file or embedded in the // inline_string. // // Example: local file // // .. code-block:: yaml // // local_jwks: // filename: /etc/envoy/jwks/jwks1.txt // // Example: inline_string // // .. code-block:: yaml // // local_jwks: // inline_string: ACADADADADA // api.v2.core.DataSource local_jwks = 4; } // If false, the JWT is removed in the request after a success verification. If true, the JWT is // not removed in the request. Default value is false. bool forward = 5; // Two fields below define where to extract the JWT from an HTTP request. // // If no explicit location is specified, the following default locations are tried in order: // // 1. The Authorization header using the `Bearer schema // `_. Example:: // // Authorization: Bearer . // // 2. `access_token `_ query parameter. // // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations // its provider specified or from the default locations. // // Specify the HTTP headers to extract JWT token. For examples, following config: // // .. code-block:: yaml // // from_headers: // - name: x-goog-iap-jwt-assertion // // can be used to extract token from header:: // // ``x-goog-iap-jwt-assertion: ``. // repeated JwtHeader from_headers = 6; // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. // // For example, if config is: // // .. code-block:: yaml // // from_params: // - jwt_token // // The JWT format in query parameter is:: // // /path?jwt_token= // repeated string from_params = 7; // This field specifies the header name to forward a successfully verified JWT payload to the // backend. The forwarded data is:: // // base64url_encoded(jwt_payload_in_JSON) // // If it is not specified, the payload will not be forwarded. string forward_payload_header = 8; // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* // and the value is the *protobuf::Struct* converted from JWT JSON payload. // // For example, if payload_in_metadata is *my_payload*: // // .. code-block:: yaml // // envoy.filters.http.jwt_authn: // my_payload: // iss: https://example.com // sub: test@example.com // aud: https://example.com // exp: 1501281058 // string payload_in_metadata = 9; } // This message specifies how to fetch JWKS from remote and how to cache it. message RemoteJwks { // The HTTP URI to fetch the JWKS. For example: // // .. code-block:: yaml // // http_uri: // uri: https://www.googleapis.com/oauth2/v1/certs // cluster: jwt.www.googleapis.com|443 // api.v2.core.HttpUri http_uri = 1; // Duration after which the cached JWKS should be expired. If not specified, default cache // duration is 5 minutes. google.protobuf.Duration cache_duration = 2; } // This message specifies a header location to extract JWT token. message JwtHeader { // The HTTP header name. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the // end. string value_prefix = 2; } // Specify a required provider with audiences. message ProviderWithAudiences { // Specify a required provider name. string provider_name = 1; // This field overrides the one specified in the JwtProvider. repeated string audiences = 2; } // This message specifies a Jwt requirement. An empty message means JWT verification is not // required. Here are some config examples: // // .. code-block:: yaml // // # Example 1: not required with an empty message // // # Example 2: require A // provider_name: provider-A // // # Example 3: require A or B // requires_any: // requirements: // - provider_name: provider-A // - provider_name: provider-B // // # Example 4: require A and B // requires_all: // requirements: // - provider_name: provider-A // - provider_name: provider-B // // # Example 5: require A and (B or C) // requires_all: // requirements: // - provider_name: provider-A // - requires_any: // requirements: // - provider_name: provider-B // - provider_name: provider-C // // # Example 6: require A or (B and C) // requires_any: // requirements: // - provider_name: provider-A // - requires_all: // requirements: // - provider_name: provider-B // - provider_name: provider-C // // # Example 7: A is optional (if token from A is provided, it must be valid, but also allows // missing token.) // requires_any: // requirements: // - provider_name: provider-A // - allow_missing: {} // // # Example 8: A is optional and B is required. // requires_all: // requirements: // - requires_any: // requirements: // - provider_name: provider-A // - allow_missing: {} // - provider_name: provider-B // // [#next-free-field: 7] message JwtRequirement { oneof requires_type { // Specify a required provider name. string provider_name = 1; // Specify a required provider with audiences. ProviderWithAudiences provider_and_audiences = 2; // Specify list of JwtRequirement. Their results are OR-ed. // If any one of them passes, the result is passed. JwtRequirementOrList requires_any = 3; // Specify list of JwtRequirement. Their results are AND-ed. // All of them must pass, if one of them fails or missing, it fails. JwtRequirementAndList requires_all = 4; // The requirement is always satisfied even if JWT is missing or the JWT // verification fails. A typical usage is: this filter is used to only verify // JWTs and pass the verified JWT payloads to another filter, the other filter // will make decision. In this mode, all JWT tokens will be verified. google.protobuf.Empty allow_missing_or_failed = 5; // The requirement is satisfied if JWT is missing, but failed if JWT is // presented but invalid. Similar to allow_missing_or_failed, this is used // to only verify JWTs and pass the verified payload to another filter. The // different is this mode will reject requests with invalid tokens. google.protobuf.Empty allow_missing = 6; } } // This message specifies a list of RequiredProvider. // Their results are OR-ed; if any one of them passes, the result is passed message JwtRequirementOrList { // Specify a list of JwtRequirement. repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a list of RequiredProvider. // Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. message JwtRequirementAndList { // Specify a list of JwtRequirement. repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a Jwt requirement for a specific Route condition. // Example 1: // // .. code-block:: yaml // // - match: // prefix: /healthz // // In above example, "requires" field is empty for /healthz prefix match, // it means that requests matching the path prefix don't require JWT authentication. // // Example 2: // // .. code-block:: yaml // // - match: // prefix: / // requires: { provider_name: provider-A } // // In above example, all requests matched the path prefix require jwt authentication // from "provider-A". message RequirementRule { // The route matching parameter. Only when the match is satisfied, the "requires" field will // apply. // // For example: following match will match all requests. // // .. code-block:: yaml // // match: // prefix: / // api.v2.route.RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. JwtRequirement requires = 2; } // This message specifies Jwt requirements based on stream_info.filterState. // This FilterState should use `Router::StringAccessor` object to set a string value. // Other HTTP filters can use it to specify Jwt requirements dynamically. // // Example: // // .. code-block:: yaml // // name: jwt_selector // requires: // issuer_1: // provider_name: issuer1 // issuer_2: // provider_name: issuer2 // // If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, // jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. message FilterStateRule { // The filter state name to retrieve the `Router::StringAccessor` object. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. map requires = 3; } // This is the Envoy HTTP filter config for JWT authentication. // // For example: // // .. code-block:: yaml // // providers: // provider1: // issuer: issuer1 // audiences: // - audience1 // - audience2 // remote_jwks: // http_uri: // uri: https://example.com/.well-known/jwks.json // cluster: example_jwks_cluster // provider2: // issuer: issuer2 // local_jwks: // inline_string: jwks_string // // rules: // # Not jwt verification is required for /health path // - match: // prefix: /health // // # Jwt verification for provider1 is required for path prefixed with "prefix" // - match: // prefix: /prefix // requires: // provider_name: provider1 // // # Jwt verification for either provider1 or provider2 is required for all other requests. // - match: // prefix: / // requires: // requires_any: // requirements: // - provider_name: provider1 // - provider_name: provider2 // message JwtAuthentication { // Map of provider names to JwtProviders. // // .. code-block:: yaml // // providers: // provider1: // issuer: issuer1 // audiences: // - audience1 // - audience2 // remote_jwks: // http_uri: // uri: https://example.com/.well-known/jwks.json // cluster: example_jwks_cluster // provider2: // issuer: provider2 // local_jwks: // inline_string: jwks_string // map providers = 1; // Specifies requirements based on the route matches. The first matched requirement will be // applied. If there are overlapped match conditions, please put the most specific match first. // // Examples // // .. code-block:: yaml // // rules: // - match: // prefix: /healthz // - match: // prefix: /baz // requires: // provider_name: provider1 // - match: // prefix: /foo // requires: // requires_any: // requirements: // - provider_name: provider1 // - provider_name: provider2 // - match: // prefix: /bar // requires: // requires_all: // requirements: // - provider_name: provider1 // - provider_name: provider2 // repeated RequirementRule rules = 2; // This message specifies Jwt requirements based on stream_info.filterState. // Other HTTP filters can use it to specify Jwt requirements dynamically. // The *rules* field above is checked first, if it could not find any matches, // check this one. FilterStateRule filter_state_rules = 3; // When set to true, bypass the `CORS preflight request // `_ regardless of JWT // requirements specified in the rules. bool bypass_cors_preflight = 4; } ================================================ FILE: api/envoy/config/filter/http/lua/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/lua/v2/lua.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.lua.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.lua.v2"; option java_outer_classname = "LuaProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.lua.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Lua] // Lua :ref:`configuration overview `. // [#extension: envoy.filters.http.lua] message Lua { // The Lua code that Envoy will execute. This can be a very small script that // further loads code from disk if desired. Note that if JSON configuration is used, the code must // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. string inline_code = 1 [(validate.rules).string = {min_bytes: 1}]; } ================================================ FILE: api/envoy/config/filter/http/on_demand/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/on_demand/v2/on_demand.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.on_demand.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.on_demand.v2"; option java_outer_classname = "OnDemandProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.on_demand.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: OnDemand] // IP tagging :ref:`configuration overview `. // [#extension: envoy.filters.http.on_demand] message OnDemand { } ================================================ FILE: api/envoy/config/filter/http/original_src/v2alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/original_src/v2alpha1/original_src.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.original_src.v2alpha1; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.original_src.v2alpha1"; option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.original_src.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. // The Original Src filter binds upstream connections to the original source address determined // for the request. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. // [#extension: envoy.filters.http.original_src] message OriginalSrc { // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to // ensure that non-local addresses may be routed back through envoy when binding to the original // source address. The option will not be applied if the mark is 0. uint32 mark = 1; } ================================================ FILE: api/envoy/config/filter/http/rate_limit/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/ratelimit/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/rate_limit/v2/rate_limit.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.rate_limit.v2; import "envoy/config/ratelimit/v2/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.rate_limit.v2"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.ratelimit.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] // [#next-free-field: 8] message RateLimit { // The rate limit domain to use when calling the rate limit service. string domain = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies the rate limit configurations to be applied with the same // stage number. If not set, the default stage number is 0. // // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The type of requests the filter should apply to. The supported // types are *internal*, *external* or *both*. A request is considered internal if // :ref:`x-envoy-internal` is set to true. If // :ref:`x-envoy-internal` is not set or false, a // request is considered external. The filter defaults to *both*, and it will apply to all request // types. string request_type = 3 [(validate.rules).string = {in: "internal" in: "external" in: "both" in: ""}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. google.protobuf.Duration timeout = 4; // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. // Defaults to false. bool failure_mode_deny = 5; // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The // HTTP code will be 200 for a gRPC response. bool rate_limited_as_resource_exhausted = 6; // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. ratelimit.v2.RateLimitServiceConfig rate_limit_service = 7 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/filter/http/rbac/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/rbac/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/rbac/v2/rbac.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.rbac.v2; import "envoy/config/rbac/v2/rbac.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.rbac.v2"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.rbac.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. // [#extension: envoy.filters.http.rbac] // RBAC filter config. message RBAC { // Specify the RBAC rules to be applied globally. // If absent, no enforcing RBAC policy will be applied. config.rbac.v2.RBAC rules = 1; // Shadow rules are not enforced by the filter (i.e., returning a 403) // but will emit stats and logs and can be used for rule testing. // If absent, no shadow RBAC policy will be applied. config.rbac.v2.RBAC shadow_rules = 2; } message RBACPerRoute { reserved 1; // Override the global configuration of the filter with this new config. // If absent, the global RBAC policy will be disabled for this route. RBAC rbac = 2; } ================================================ FILE: api/envoy/config/filter/http/router/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/accesslog/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/router/v2/router.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.router.v2; import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.router.v2"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.router.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Router] // Router :ref:`configuration overview `. // [#extension: envoy.filters.http.router] // [#next-free-field: 7] message Router { // Whether the router generates dynamic cluster statistics. Defaults to // true. Can be disabled in high performance scenarios. google.protobuf.BoolValue dynamic_stats = 1; // Whether to start a child span for egress routed calls. This can be // useful in scenarios where other filters (auth, ratelimit, etc.) make // outbound calls and have child spans rooted at the same ingress // parent. Defaults to false. bool start_child_span = 2; // Configuration for HTTP upstream logs emitted by the router. Upstream logs // are configured in the same way as access logs, but each log entry represents // an upstream request. Presuming retries are configured, multiple upstream // requests may be made for each downstream (inbound) request. repeated accesslog.v2.AccessLog upstream_log = 3; // Do not add any additional *x-envoy-* headers to requests or responses. This // only affects the :ref:`router filter generated *x-envoy-* headers // `, other Envoy filters and the HTTP // connection manager may continue to set *x-envoy-* headers. bool suppress_envoy_headers = 4; // Specifies a list of HTTP headers to strictly validate. Envoy will reject a // request and respond with HTTP status 400 if the request contains an invalid // value for any of the headers listed in this field. Strict header checking // is only supported for the following headers: // // Value must be a ','-delimited list (i.e. no spaces) of supported retry // policy values: // // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` // * :ref:`config_http_filters_router_x-envoy-retry-on` // // Value must be an integer: // // * :ref:`config_http_filters_router_x-envoy-max-retries` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` repeated string strict_check_headers = 5 [(validate.rules).repeated = { items { string { in: "x-envoy-upstream-rq-timeout-ms" in: "x-envoy-upstream-rq-per-try-timeout-ms" in: "x-envoy-max-retries" in: "x-envoy-retry-grpc-on" in: "x-envoy-retry-on" } } }]; // If not set, ingress Envoy will ignore // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress // Envoy, when deriving timeout for upstream cluster. bool respect_expected_rq_timeout = 6; } ================================================ FILE: api/envoy/config/filter/http/squash/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/squash/v2/squash.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.squash.v2; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.squash.v2"; option java_outer_classname = "SquashProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.squash.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Squash] // Squash :ref:`configuration overview `. // [#extension: envoy.filters.http.squash] // [#next-free-field: 6] message Squash { // The name of the cluster that hosts the Squash server. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // When the filter requests the Squash server to create a DebugAttachment, it will use this // structure as template for the body of the request. It can contain reference to environment // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server // with more information to find the process to attach the debugger to. For example, in a // Istio/k8s environment, this will contain information on the pod: // // .. code-block:: json // // { // "spec": { // "attachment": { // "pod": "{{ POD_NAME }}", // "namespace": "{{ POD_NAMESPACE }}" // }, // "match_request": true // } // } // // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) google.protobuf.Struct attachment_template = 2; // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second. google.protobuf.Duration request_timeout = 3; // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60 // seconds. google.protobuf.Duration attachment_timeout = 4; // Amount of time to poll for the status of the attachment object in the Squash server // (to check if has been attached). Defaults to 1 second. google.protobuf.Duration attachment_poll_period = 5; } ================================================ FILE: api/envoy/config/filter/http/tap/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/common/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/http/tap/v2alpha/tap.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.tap.v2alpha; import "envoy/config/common/tap/v2alpha/common.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.tap.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.tap.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. // [#extension: envoy.filters.http.tap] // Top level configuration for the tap filter. message Tap { // Common configuration for the HTTP tap filter. common.tap.v2alpha.CommonExtensionConfig common_config = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/filter/http/transcoder/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/http/transcoder/v2/transcoder.proto ================================================ syntax = "proto3"; package envoy.config.filter.http.transcoder.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.http.transcoder.v2"; option java_outer_classname = "TranscoderProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.http.grpc_json_transcoder.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC-JSON transcoder] // gRPC-JSON transcoder :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_json_transcoder] // [#next-free-field: 10] message GrpcJsonTranscoder { message PrintOptions { // Whether to add spaces, line breaks and indentation to make the JSON // output easy to read. Defaults to false. bool add_whitespace = 1; // Whether to always print primitive fields. By default primitive // fields with default values will be omitted in JSON output. For // example, an int32 field set to 0 will be omitted. Setting this flag to // true will override the default behavior and print primitive fields // regardless of their values. Defaults to false. bool always_print_primitive_fields = 2; // Whether to always print enums as ints. By default they are rendered // as strings. Defaults to false. bool always_print_enums_as_ints = 3; // Whether to preserve proto field names. By default protobuf will // generate JSON field names using the ``json_name`` option, or lower camel case, // in that order. Setting this flag will preserve the original field names. Defaults to false. bool preserve_proto_field_names = 4; } oneof descriptor_set { option (validate.required) = true; // Supplies the filename of // :ref:`the proto descriptor set ` for the gRPC // services. string proto_descriptor = 1; // Supplies the binary content of // :ref:`the proto descriptor set ` for the gRPC // services. bytes proto_descriptor_bin = 4; } // A list of strings that // supplies the fully qualified service names (i.e. "package_name.service_name") that // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than // the service names specified here, but they won't be translated. repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; // Control options for response JSON. These options are passed directly to // `JsonPrintOptions `_. PrintOptions print_options = 3; // Whether to keep the incoming request route after the outgoing headers have been transformed to // the match the upstream gRPC service. Note: This means that routes for gRPC services that are // not transcoded cannot be used in combination with *match_incoming_request_route*. bool match_incoming_request_route = 5; // A list of query parameters to be ignored for transcoding method mapping. // By default, the transcoder filter will not transcode a request if there are any // unknown/invalid query parameters. // // Example : // // .. code-block:: proto // // service Bookstore { // rpc GetShelf(GetShelfRequest) returns (Shelf) { // option (google.api.http) = { // get: "/shelves/{shelf}" // }; // } // } // // message GetShelfRequest { // int64 shelf = 1; // } // // message Shelf {} // // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow // the same request to be mapped to ``GetShelf``. repeated string ignored_query_parameters = 6; // Whether to route methods without the ``google.api.http`` option. // // Example : // // .. code-block:: proto // // package bookstore; // // service Bookstore { // rpc GetShelf(GetShelfRequest) returns (Shelf) {} // } // // message GetShelfRequest { // int64 shelf = 1; // } // // message Shelf {} // // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. bool auto_mapping = 7; // Whether to ignore query parameters that cannot be mapped to a corresponding // protobuf field. Use this if you cannot control the query parameters and do // not know them beforehand. Otherwise use ``ignored_query_parameters``. // Defaults to false. bool ignore_unknown_query_parameters = 8; // Whether to convert gRPC status headers to JSON. // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` // from the ``grpc-status-details-bin`` header and use it as JSON body. // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and // ``grpc-message`` headers. // The error details types must be present in the ``proto_descriptor``. // // For example, if an upstream server replies with headers: // // .. code-block:: none // // grpc-status: 5 // grpc-status-details-bin: // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ // // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message // ``google.rpc.Status``. It will be transcoded into: // // .. code-block:: none // // HTTP/1.1 404 Not Found // content-type: application/json // // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} // // In order to transcode the message, the ``google.rpc.RequestInfo`` type from // the ``google/rpc/error_details.proto`` should be included in the configured // :ref:`proto descriptor set `. bool convert_grpc_status = 9; } ================================================ FILE: api/envoy/config/filter/listener/http_inspector/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/listener/http_inspector/v2/http_inspector.proto ================================================ syntax = "proto3"; package envoy.config.filter.listener.http_inspector.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.http_inspector.v2"; option java_outer_classname = "HttpInspectorProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.http_inspector.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP Inspector Filter] // Detect whether the application protocol is HTTP. // [#extension: envoy.filters.listener.http_inspector] message HttpInspector { } ================================================ FILE: api/envoy/config/filter/listener/original_dst/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/listener/original_dst/v2/original_dst.proto ================================================ syntax = "proto3"; package envoy.config.filter.listener.original_dst.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.original_dst.v2"; option java_outer_classname = "OriginalDstProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.original_dst.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Original Dst Filter] // Use the Original destination address on downstream connections. // [#extension: envoy.filters.listener.original_dst] message OriginalDst { } ================================================ FILE: api/envoy/config/filter/listener/original_src/v2alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/listener/original_src/v2alpha1/original_src.proto ================================================ syntax = "proto3"; package envoy.config.filter.listener.original_src.v2alpha1; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.original_src.v2alpha1"; option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.original_src.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. // [#extension: envoy.filters.listener.original_src] // The Original Src filter binds upstream connections to the original source address determined // for the connection. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. message OriginalSrc { // Whether to bind the port to the one used in the original downstream connection. // [#not-implemented-hide:] bool bind_port = 1; // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to // ensure that non-local addresses may be routed back through envoy when binding to the original // source address. The option will not be applied if the mark is 0. uint32 mark = 2; } ================================================ FILE: api/envoy/config/filter/listener/proxy_protocol/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/listener/proxy_protocol/v2/proxy_protocol.proto ================================================ syntax = "proto3"; package envoy.config.filter.listener.proxy_protocol.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.proxy_protocol.v2"; option java_outer_classname = "ProxyProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.proxy_protocol.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Proxy Protocol Filter] // PROXY protocol listener filter. // [#extension: envoy.filters.listener.proxy_protocol] message ProxyProtocol { } ================================================ FILE: api/envoy/config/filter/listener/tls_inspector/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/listener/tls_inspector/v2/tls_inspector.proto ================================================ syntax = "proto3"; package envoy.config.filter.listener.tls_inspector.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.listener.tls_inspector.v2"; option java_outer_classname = "TlsInspectorProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.listener.tls_inspector.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: TLS Inspector Filter] // Allows detecting whether the transport appears to be TLS or plaintext. // [#extension: envoy.filters.listener.tls_inspector] message TlsInspector { } ================================================ FILE: api/envoy/config/filter/network/client_ssl_auth/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/client_ssl_auth/v2/client_ssl_auth.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.client_ssl_auth.v2; import "envoy/api/v2/core/address.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.client_ssl_auth.v2"; option java_outer_classname = "ClientSslAuthProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.client_ssl_auth.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Client TLS authentication] // Client TLS authentication // :ref:`configuration overview `. // [#extension: envoy.filters.network.client_ssl_auth] message ClientSSLAuth { // The :ref:`cluster manager ` cluster that runs // the authentication service. The filter will connect to the service every 60s to fetch the list // of principals. The service must support the expected :ref:`REST API // `. string auth_api_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // The prefix to use when emitting :ref:`statistics // `. string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; // Time in milliseconds between principal refreshes from the // authentication service. Default is 60000 (60s). The actual fetch time // will be this value plus a random jittered value between // 0-refresh_delay_ms milliseconds. google.protobuf.Duration refresh_delay = 3; // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no // IP allowlist. repeated api.v2.core.CidrRange ip_white_list = 4; } ================================================ FILE: api/envoy/config/filter/network/direct_response/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/direct_response/v2/config.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.direct_response.v2; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.direct_response.v2"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.direct_response.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Direct response] // Direct response :ref:`configuration overview `. // [#extension: envoy.filters.network.direct_response] message Config { // Response data as a data source. api.v2.core.DataSource response = 1; } ================================================ FILE: api/envoy/config/filter/network/dubbo_proxy/v2alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/route:pkg", "//envoy/type:pkg", "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/dubbo_proxy/v2alpha1/README.md ================================================ Protocol buffer definitions for the Dubbo proxy. ================================================ FILE: api/envoy/config/filter/network/dubbo_proxy/v2alpha1/dubbo_proxy.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.dubbo_proxy.v2alpha1; import "envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; option java_outer_classname = "DubboProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.dubbo_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.dubbo_proxy] // Dubbo Protocol types supported by Envoy. enum ProtocolType { // the default protocol. Dubbo = 0; } // Dubbo Serialization types supported by Envoy. enum SerializationType { // the default serialization protocol. Hessian2 = 0; } // [#next-free-field: 6] message DubboProxy { // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // Configure the protocol used. ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; // Configure the serialization protocol used. SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; // The route table for the connection manager is static and is specified in this property. repeated RouteConfiguration route_config = 4; // A list of individual Dubbo filters that make up the filter chain for requests made to the // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards // compatibility, if no dubbo_filters are specified, a default Dubbo router filter // (`envoy.filters.dubbo.router`) is used. repeated DubboFilter dubbo_filters = 5; } // DubboFilter configures a Dubbo filter. message DubboFilter { // The name of the filter to instantiate. The name must match a supported // filter. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. google.protobuf.Any config = 2; } ================================================ FILE: api/envoy/config/filter/network/dubbo_proxy/v2alpha1/route.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.dubbo_proxy.v2alpha1; import "envoy/api/v2/route/route_components.proto"; import "envoy/type/matcher/string.proto"; import "envoy/type/range.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.dubbo_proxy.v2alpha1"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.dubbo_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dubbo Proxy Route Configuration] // Dubbo Proxy :ref:`configuration overview `. // [#next-free-field: 6] message RouteConfiguration { // The name of the route configuration. Reserved for future use in asynchronous route discovery. string name = 1; // The interface name of the service. string interface = 2; // Which group does the interface belong to. string group = 3; // The version number of the interface. string version = 4; // The list of routes that will be matched, in order, against incoming requests. The first route // that matches will be used. repeated Route routes = 5; } message Route { // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. RouteAction route = 2 [(validate.rules).message = {required: true}]; } message RouteMatch { // Method level routing matching. MethodMatch method = 1; // Specifies a set of headers that the route should match on. The router will check the request’s // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). repeated api.v2.route.HeaderMatcher headers = 2; } message RouteAction { oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed. string cluster = 1; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. // Currently ClusterWeight only supports the name and weight fields. api.v2.route.WeightedCluster weighted_clusters = 2; } } message MethodMatch { // The parameter matching type. message ParameterMatchSpecifier { oneof parameter_match_specifier { // If specified, header match will be performed based on the value of the header. string exact_match = 3; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. // The entire request header value must represent an integer in base 10 notation: consisting // of an optional plus or minus sign followed by a sequence of digits. The rule will not match // if the header value does not represent an integer. Match will fail for empty values, // floating point numbers or if only a subsequence of the header value is an integer. // // Examples: // // * For range [-10,0), route will match for header value -1, but not for 0, // "somestring", 10.9, "-1somestring" type.Int64Range range_match = 4; } } // The name of the method. type.matcher.StringMatcher name = 1; // Method parameter definition. // The key is the parameter index, starting from 0. // The value is the parameter matching type. map params_match = 2; } ================================================ FILE: api/envoy/config/filter/network/echo/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/network/echo/v2/echo.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.echo.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.echo.v2"; option java_outer_classname = "EchoProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.echo.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Echo] // Echo :ref:`configuration overview `. // [#extension: envoy.filters.network.echo] message Echo { } ================================================ FILE: api/envoy/config/filter/network/ext_authz/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/ext_authz/v2/ext_authz.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.ext_authz.v2; import "envoy/api/v2/core/grpc_service.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.ext_authz.v2"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.ext_authz.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Network External Authorization ] // The network layer external authorization service configuration // :ref:`configuration overview `. // [#extension: envoy.filters.network.ext_authz] // External Authorization filter calls out to an external service over the // gRPC Authorization API defined by // :ref:`CheckRequest `. // A failed check will cause this filter to close the TCP connection. message ExtAuthz { // The prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The external authorization gRPC service configuration. // The default timeout is set to 200ms by this filter. api.v2.core.GrpcService grpc_service = 2; // The filter's behaviour in case the external authorization service does // not respond back. When it is set to true, Envoy will also allow traffic in case of // communication failure between authorization service and the proxy. // Defaults to false. bool failure_mode_allow = 3; // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 4; } ================================================ FILE: api/envoy/config/filter/network/http_connection_manager/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2:pkg", "//envoy/api/v2/core:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/trace/v2:pkg", "//envoy/type:pkg", "//envoy/type/tracing/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.http_connection_manager.v2; import "envoy/api/v2/core/config_source.proto"; import "envoy/api/v2/core/protocol.proto"; import "envoy/api/v2/route.proto"; import "envoy/api/v2/scoped_route.proto"; import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/config/trace/v2/http_tracer.proto"; import "envoy/type/percent.proto"; import "envoy/type/tracing/v2/custom_tag.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.http_connection_manager.v2"; option java_outer_classname = "HttpConnectionManagerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.http_connection_manager.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] // [#next-free-field: 37] message HttpConnectionManager { enum CodecType { // For every new connection, the connection manager will determine which // codec to use. This mode supports both ALPN for TLS listeners as well as // protocol inference for plaintext listeners. If ALPN data is available, it // is preferred, otherwise protocol inference is used. In almost all cases, // this is the right option to choose for this setting. AUTO = 0; // The connection manager will assume that the client is speaking HTTP/1.1. HTTP1 = 1; // The connection manager will assume that the client is speaking HTTP/2 // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. // Prior knowledge is allowed). HTTP2 = 2; // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient // to distinguish HTTP1 and HTTP2 traffic. HTTP3 = 3; } enum ServerHeaderTransformation { // Overwrite any Server header with the contents of server_name. OVERWRITE = 0; // If no Server header is present, append Server server_name // If a Server header is present, pass it through. APPEND_IF_ABSENT = 1; // Pass through the value of the server header, and do not append a header // if none is present. PASS_THROUGH = 2; } // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. enum ForwardClientCertDetails { // Do not send the XFCC header to the next hop. This is the default value. SANITIZE = 0; // When the client connection is mTLS (Mutual TLS), forward the XFCC header // in the request. FORWARD_ONLY = 1; // When the client connection is mTLS, append the client certificate // information to the request’s XFCC header and forward it. APPEND_FORWARD = 2; // When the client connection is mTLS, reset the XFCC header with the client // certificate information and send it to the next hop. SANITIZE_SET = 3; // Always forward the XFCC header in the request, regardless of whether the // client connection is mTLS. ALWAYS_FORWARD_ONLY = 4; } // [#next-free-field: 10] message Tracing { enum OperationName { // The HTTP listener is used for ingress/incoming requests. INGRESS = 0; // The HTTP listener is used for egress/outgoing requests. EGRESS = 1; } // The span name will be derived from this field. If // :ref:`traffic_direction ` is // specified on the parent listener, then it is used instead of this field. // // .. attention:: // This field has been deprecated in favor of `traffic_direction`. OperationName operation_name = 1 [ deprecated = true, (validate.rules).enum = {defined_only: true}, (envoy.annotations.disallowed_by_default) = true ]; // A list of header names used to create tags for the active span. The header name is used to // populate the tag name, and the header value is used to populate the tag value. The tag is // created if the specified header name is present in the request's headers. // // .. attention:: // This field has been deprecated in favor of :ref:`custom_tags // `. repeated string request_headers_for_tags = 2 [deprecated = true]; // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% type.Percent client_sampling = 3; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.Percent random_sampling = 4; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random // sampling). This field functions as an upper limit on the total configured sampling rate. For // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% // of client requests with the appropriate headers to be force traced. This field is a direct // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.Percent overall_sampling = 5; // Whether to annotate spans with additional data. If true, spans will include logs for stream // events. bool verbose = 6; // Maximum length of the request path to extract and include in the HttpUrl tag. Used to // truncate lengthy request paths to meet the needs of a tracing backend. // Default: 256 google.protobuf.UInt32Value max_path_tag_length = 7; // A list of custom tags with unique tag name to create tags for the active span. repeated type.tracing.v2.CustomTag custom_tags = 8; // Configuration for an external tracing provider. // If not specified, no tracing will be performed. // // .. attention:: // Please be aware that *envoy.tracers.opencensus* provider can only be configured once // in Envoy lifetime. // Any attempts to reconfigure it or to use different configurations for different HCM filters // will be rejected. // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes // on OpenCensus side. trace.v2.Tracing.Http provider = 9; } message InternalAddressConfig { // Whether unix socket addresses should be considered internal. bool unix_sockets = 1; } // [#next-free-field: 7] message SetCurrentClientCertDetails { reserved 2; // Whether to forward the subject of the client cert. Defaults to false. google.protobuf.BoolValue subject = 1; // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the // XFCC header comma separated from other values with the value Cert="PEM". // Defaults to false. bool cert = 3; // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM // format. This will appear in the XFCC header comma separated from other values with the value // Chain="PEM". // Defaults to false. bool chain = 6; // Whether to forward the DNS type Subject Alternative Names of the client cert. // Defaults to false. bool dns = 4; // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to // false. bool uri = 5; } // The configuration for HTTP upgrades. // For each upgrade type desired, an UpgradeConfig must be added. // // .. warning:: // // The current implementation of upgrade headers does not handle // multi-valued upgrade headers. Support for multi-valued headers may be // added in the future if needed. // // .. warning:: // The current implementation of upgrade headers does not work with HTTP/2 // upstreams. message UpgradeConfig { // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] // will be proxied upstream. string upgrade_type = 1; // If present, this represents the filter chain which will be created for // this type of upgrade. If no filters are present, the filter chain for // HTTP connections will be used for this upgrade type. repeated HttpFilter filters = 2; // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } reserved 27; // Supplies the type of codec that the connection manager should use. CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics for the // connection manager. See the :ref:`statistics documentation ` for // more information. string stat_prefix = 2 [(validate.rules).string = {min_bytes: 1}]; oneof route_specifier { option (validate.required) = true; // The connection manager’s route table will be dynamically loaded via the RDS API. Rds rds = 3; // The route table for the connection manager is static and is specified in this property. api.v2.RouteConfiguration route_config = 4; // A route table will be dynamically assigned to each request based on request attributes // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are // specified in this message. ScopedRoutes scoped_routes = 31; } // A list of individual HTTP filters that make up the filter chain for // requests made to the connection manager. :ref:`Order matters ` // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked // documentation for more information. Defaults to false. google.protobuf.BoolValue add_user_agent = 6; // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. Tracing tracing = 7; // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. api.v2.core.HttpProtocolOptions common_http_protocol_options = 35; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. api.v2.core.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. api.v2.core.Http2ProtocolOptions http2_protocol_options = 9; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. string server_name = 10; // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in // server_name. ServerHeaderTransformation server_header_transformation = 34 [(validate.rules).enum = {defined_only: true}]; // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. // Requests that exceed this limit will receive a 431 response. // The max configurable limit is 96 KiB, based on current implementation // constraints. google.protobuf.UInt32Value max_request_headers_kb = 29 [(validate.rules).uint32 = {lte: 96 gt: 0}]; // The idle timeout for connections managed by the connection manager. The // idle timeout is defined as the period in which there are no active // requests. If not set, there is no idle timeout. When the idle timeout is // reached the connection will be closed. If the connection is an HTTP/2 // connection a drain sequence will occur prior to closing the connection. // This field is deprecated. Use :ref:`idle_timeout // ` // instead. google.protobuf.Duration idle_timeout = 11 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected // so as not to interfere with any smaller configured timeouts that may have // existed in configurations prior to the introduction of this feature, while // introducing robustness to TCP connections that terminate without a FIN. // // This idle timeout applies to new streams and is overridable by the // :ref:`route-level idle_timeout // `. Even on a stream in // which the override applies, prior to receipt of the initial request // headers, the :ref:`stream_idle_timeout // ` // applies. Each time an encode/decode event for headers or data is processed // for the stream, the timer will be reset. If the timeout fires, the stream // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough // window to write any remaining stream data once the entirety of stream data (local end stream is // true) has been buffered pending available window. In other words, this timeout defends against // a peer that does not release enough window to completely write the stream, even though all // data has been proxied within available flow control windows. If the timeout is hit in this // case, the :ref:`tx_flush_timeout ` counter will be // incremented. Note that :ref:`max_stream_duration // ` does not apply to this corner // case. // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the // wire while the connection manage is only able to observe the end-of-headers event, hence the // stream may still idle timeout. // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. google.protobuf.Duration stream_idle_timeout = 24; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. google.protobuf.Duration request_timeout = 28; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. // This is used so that Envoy provides a grace period for new streams that // race with the final GOAWAY frame. During this grace period, Envoy will // continue to accept new streams. After the grace period, a final GOAWAY // frame is sent and Envoy will start refusing new streams. Draining occurs // both when a connection hits the idle timeout or during general server // draining. The default grace period is 5000 milliseconds (5 seconds) if this // option is not specified. google.protobuf.Duration drain_timeout = 12; // The delayed close timeout is for downstream connections managed by the HTTP connection manager. // It is defined as a grace period after connection close processing has been locally initiated // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy // from the downstream connection) prior to Envoy closing the socket associated with that // connection. // NOTE: This timeout is enforced even when the socket associated with the downstream connection // is pending a flush of the write buffer. However, any progress made writing data to the socket // will restart the timer associated with this timeout. This means that the total grace period for // a socket in this state will be // +. // // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close // sequence mitigates a race condition that exists when downstream clients do not drain/process // data in a connection's receive buffer after a remote close has been detected via a socket // write(). This race leads to such clients failing to process the response code sent by Envoy, // which could result in erroneous downstream processing. // // If the timeout triggers, Envoy will close the connection's socket. // // The default timeout is 1000 ms if this option is not specified. // // .. NOTE:: // To be useful in avoiding the race condition described above, this timeout must be set // to *at least* +<100ms to account for // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. // // .. WARNING:: // A value of 0 will completely disable delayed close processing. When disabled, the downstream // connection's socket will be closed immediately after the write flush is completed or will // never close if the write flush does not complete. google.protobuf.Duration delayed_close_timeout = 26; // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. repeated accesslog.v2.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating // various headers. If set to false or absent, the connection manager will use the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. google.protobuf.BoolValue use_remote_address = 14; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when // determining the origin client's IP address. The default is zero if this option // is not specified. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. uint32 xff_num_trusted_hops = 19; // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information about internal/external addresses. InternalAddressConfig internal_address_config = 25; // If set, Envoy will not append the remote address to the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager // has mutated the request headers. While :ref:`use_remote_address // ` // will also suppress XFF addition, it has consequences for logging and other // Envoy uses of the remote address, so *skip_xff_append* should be used // when only an elision of XFF addition is intended. bool skip_xff_append = 21; // Via header value to append to request and response headers. If this is // empty, no via header will be appended. string via = 22; // Whether the connection manager will generate the :ref:`x-request-id // ` header if it does not exist. This defaults to // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature // is not desired it can be disabled. google.protobuf.BoolValue generate_request_id = 15; // Whether the connection manager will keep the :ref:`x-request-id // ` header if passed for a request that is edge // (Edge request is the request from external clients to front Envoy) and not reset it, which // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 [(validate.rules).enum = {defined_only: true}]; // This field is valid only when :ref:`forward_client_cert_details // ` // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in // the client certificate to be forwarded. Note that in the // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and // *By* is always set when the client certificate presents the URI type Subject Alternative Name // value. SetCurrentClientCertDetails set_current_client_cert_details = 17; // If proxy_100_continue is true, Envoy will proxy incoming "Expect: // 100-continue" headers upstream, and forward "100 Continue" responses // downstream. If this is false or not set, Envoy will instead strip the // "Expect: 100-continue" header, and send a "100 Continue" response itself. bool proxy_100_continue = 18; // If // :ref:`use_remote_address // ` // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. // This is useful for testing compatibility of upstream services that parse the header value. For // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses // `_ for details. This will also affect the // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 // ` for runtime // control. // [#not-implemented-hide:] bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; repeated UpgradeConfig upgrade_configs = 23; // Should paths be normalized according to RFC 3986 before any processing of // requests by HTTP filters or routing? This affects the upstream *:path* header // as well. For paths that fail this check, Envoy will respond with 400 to // paths that are malformed. This defaults to false currently but will default // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as // generation, validation, and associated tracing operations. // // If not set, Envoy uses the default UUID-based behavior: // // 1. Request ID is propagated using *x-request-id* header. // // 2. Request ID is a universally unique identifier (UUID). // // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; } message Rds { // Configuration source specifier for RDS. api.v2.core.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // The name of the route configuration. This name will be passed to the RDS // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. string route_config_name = 2 [(validate.rules).string = {min_bytes: 1}]; } // This message is used to work around the limitations with 'oneof' and repeated fields. message ScopedRouteConfigurationsList { repeated api.v2.ScopedRouteConfiguration scoped_route_configurations = 1 [(validate.rules).repeated = {min_items: 1}]; } // [#next-free-field: 6] message ScopedRoutes { // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These // keys are matched against a set of :ref:`Key` // objects assembled from :ref:`ScopedRouteConfiguration` // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via // :ref:`scoped_route_configurations_list`. // // Upon receiving a request's headers, the Router will build a key using the algorithm specified // by this message. This key will be used to look up the routing table (i.e., the // :ref:`RouteConfiguration`) to use for the request. message ScopeKeyBuilder { // Specifies the mechanism for constructing key fragments which are composed into scope keys. message FragmentBuilder { // Specifies how the value of a header should be extracted. // The following example maps the structure of a header to the fields in this message. // // .. code:: // // <0> <1> <-- index // X-Header: a=b;c=d // | || | // | || \----> // | || // | |\----> // | | // | \----> // | // \----> // // Each 'a=b' key-value pair constitutes an 'element' of the header field. message HeaderValueExtractor { // Specifies a header field's key value pair to match on. message KvElement { // The separator between key and value (e.g., '=' separates 'k=v;...'). // If an element is an empty string, the element is ignored. // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. string separator = 1 [(validate.rules).string = {min_bytes: 1}]; // The key to match on. string key = 2 [(validate.rules).string = {min_bytes: 1}]; } // The name of the header field to extract the value from. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The element separator (e.g., ';' separates 'a;b;c;d'). // Default: empty string. This causes the entirety of the header field to be extracted. // If this field is set to an empty string and 'index' is used in the oneof below, 'index' // must be set to 0. string element_separator = 2; oneof extract_type { // Specifies the zero based index of the element to extract. // Note Envoy concatenates multiple values of the same header key into a comma separated // string, the splitting always happens after the concatenation. uint32 index = 3; // Specifies the key value pair to extract the value from. KvElement element = 4; } } oneof type { option (validate.required) = true; // Specifies how a header field's value should be extracted. HeaderValueExtractor header_value_extractor = 1; } } // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the // fragments of a :ref:`ScopedRouteConfiguration`. // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } // The name assigned to the scoped routing configuration. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // The algorithm to use for constructing a scope key for each request. ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; // Configuration source specifier for RDS. // This config source is used to subscribe to RouteConfiguration resources specified in // ScopedRouteConfiguration messages. api.v2.core.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; oneof config_specifier { option (validate.required) = true; // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by // matching a key constructed from the request's attributes according to the algorithm specified // by the // :ref:`ScopeKeyBuilder` // in this message. ScopedRouteConfigurationsList scoped_route_configurations_list = 4; // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS // API. A scope is assigned to a request by matching a key constructed from the request's // attributes according to the algorithm specified by the // :ref:`ScopeKeyBuilder` // in this message. ScopedRds scoped_rds = 5; } } message ScopedRds { // Configuration source specifier for scoped RDS. api.v2.core.ConfigSource scoped_rds_config_source = 1 [(validate.rules).message = {required: true}]; } message HttpFilter { reserved 3; // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 4; } } message RequestIDExtension { // Request ID extension specific configuration. google.protobuf.Any typed_config = 1; } ================================================ FILE: api/envoy/config/filter/network/kafka_broker/v2alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/network/kafka_broker/v2alpha1/kafka_broker.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.kafka_broker.v2alpha1; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.kafka_broker.v2alpha1"; option java_outer_classname = "KafkaBrokerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.kafka_broker.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Kafka Broker] // Kafka Broker :ref:`configuration overview `. // [#extension: envoy.filters.network.kafka_broker] message KafkaBroker { // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; } ================================================ FILE: api/envoy/config/filter/network/local_rate_limit/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/local_rate_limit/v2alpha/local_rate_limit.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.local_rate_limit.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/type/token_bucket.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.local_rate_limit.v2alpha"; option java_outer_classname = "LocalRateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.local_ratelimit.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Local rate limit] // Local rate limit :ref:`configuration overview `. // [#extension: envoy.filters.network.local_ratelimit] message LocalRateLimit { // The prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The token bucket configuration to use for rate limiting connections that are processed by the // filter's filter chain. Each incoming connection processed by the filter consumes a single // token. If the token is available, the connection will be allowed. If no tokens are available, // the connection will be immediately closed. // // .. note:: // In the current implementation each filter and filter chain has an independent rate limit. // // .. note:: // In the current implementation the token bucket's :ref:`fill_interval // ` must be >= 50ms to avoid too aggressive // refills. type.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}]; // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults // to enabled. api.v2.core.RuntimeFeatureFlag runtime_enabled = 3; } ================================================ FILE: api/envoy/config/filter/network/mongo_proxy/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/fault/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.mongo_proxy.v2; import "envoy/config/filter/fault/v2/fault.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.mongo_proxy.v2"; option java_outer_classname = "MongoProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.mongo_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Mongo proxy] // MongoDB :ref:`configuration overview `. // [#extension: envoy.filters.network.mongo_proxy] message MongoProxy { // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The optional path to use for writing Mongo access logs. If not access log // path is specified no access logs will be written. Note that access log is // also gated :ref:`runtime `. string access_log = 2; // Inject a fixed delay before proxying a Mongo operation. Delays are // applied to the following MongoDB operations: Query, Insert, GetMore, // and KillCursors. Once an active delay is in progress, all incoming // data up until the timer event fires will be a part of the delay. fault.v2.FaultDelay delay = 3; // Flag to specify whether :ref:`dynamic metadata // ` should be emitted. Defaults to false. bool emit_dynamic_metadata = 4; } ================================================ FILE: api/envoy/config/filter/network/mysql_proxy/v1alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/network/mysql_proxy/v1alpha1/mysql_proxy.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.mysql_proxy.v1alpha1; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.mysql_proxy.v1alpha1"; option java_outer_classname = "MysqlProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.mysql_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: MySQL proxy] // MySQL Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.mysql_proxy] message MySQLProxy { // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] The optional path to use for writing MySQL access logs. // If the access log field is empty, access logs will not be written. string access_log = 2; } ================================================ FILE: api/envoy/config/filter/network/rate_limit/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/ratelimit:pkg", "//envoy/config/ratelimit/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/rate_limit/v2/rate_limit.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.rate_limit.v2; import "envoy/api/v2/ratelimit/ratelimit.proto"; import "envoy/config/ratelimit/v2/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.rate_limit.v2"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.ratelimit.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.network.ratelimit] // [#next-free-field: 7] message RateLimit { // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The rate limit domain to use in the rate limit service request. string domain = 2 [(validate.rules).string = {min_bytes: 1}]; // The rate limit descriptor list to use in the rate limit service request. repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 3 [(validate.rules).repeated = {min_items: 1}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. google.protobuf.Duration timeout = 4; // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. // Defaults to false. bool failure_mode_deny = 5; // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. ratelimit.v2.RateLimitServiceConfig rate_limit_service = 6 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/filter/network/rbac/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/rbac/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/rbac/v2/rbac.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.rbac.v2; import "envoy/config/rbac/v2/rbac.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.rbac.v2"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.rbac.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. // [#extension: envoy.filters.network.rbac] // RBAC network filter config. // // Header should not be used in rules/shadow_rules in RBAC network filter as // this information is only available in :ref:`RBAC http filter `. message RBAC { enum EnforcementType { // Apply RBAC policies when the first byte of data arrives on the connection. ONE_TIME_ON_FIRST_BYTE = 0; // Continuously apply RBAC policies as data arrives. Use this mode when // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, // etc. when the protocol decoders emit dynamic metadata such as the // resources being accessed and the operations on the resources. CONTINUOUS = 1; } // Specify the RBAC rules to be applied globally. // If absent, no enforcing RBAC policy will be applied. config.rbac.v2.RBAC rules = 1; // Shadow rules are not enforced by the filter but will emit stats and logs // and can be used for rule testing. // If absent, no shadow RBAC policy will be applied. config.rbac.v2.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. string stat_prefix = 3 [(validate.rules).string = {min_bytes: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in // conjunction with filters that emit dynamic metadata after decoding // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to // CONTINUOUS to enforce RBAC policies on every message boundary. EnforcementType enforcement_type = 4; } ================================================ FILE: api/envoy/config/filter/network/redis_proxy/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/redis_proxy/v2/redis_proxy.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.redis_proxy.v2; import "envoy/api/v2/core/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.redis_proxy.v2"; option java_outer_classname = "RedisProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.redis_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Redis Proxy] // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] // [#next-free-field: 7] message RedisProxy { // Redis connection pool settings. // [#next-free-field: 9] message ConnPoolSettings { // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { // Default mode. Read from the current primary node. MASTER = 0; // Read from the primary, but if it is unavailable, read from replica nodes. PREFER_MASTER = 1; // Read from replica nodes. If multiple replica nodes are present within a shard, a random // node is selected. Healthy nodes have precedent over unhealthy nodes. REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not // present or unhealthy), read from the primary. PREFER_REPLICA = 3; // Read from any node of the cluster. A random node is selected among the primary and // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } // Per-operation timeout in milliseconds. The timer starts when the first // command of a pipeline is written to the backend connection. Each response received from Redis // resets the timer since it signifies that the next command is being processed by the backend. // The only exception to this behavior is when a connection to a backend is not yet established. // In that case, the connect timeout on the cluster will govern the timeout until the connection // is ready. google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}]; // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be // forwarded to the same upstream. The hash key used for determining the upstream in a // consistent hash ring configuration will be computed from the hash tagged key instead of the // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster // implementation `_. // // Examples: // // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream bool enable_hashtagging = 2; // Accept `moved and ask redirection // `_ errors from upstream // redis servers, and retry commands to the specified target server. The target server does not // need to be known to the cluster manager. If the command cannot be redirected, then the // original error is passed downstream unchanged. By default, this support is not enabled. bool enable_redirection = 3; // Maximum size of encoded request buffer before flush is triggered and encoded requests // are sent upstream. If this is unset, the buffer flushes whenever it receives data // and performs no batching. // This feature makes it possible for multiple clients to send requests to Envoy and have // them batched- for example if one is running several worker processes, each with its own // Redis connection. There is no benefit to using this with a single downstream process. // Recommended size (if enabled) is 1024 bytes. uint32 max_buffer_size_before_flush = 4; // The encoded request buffer is flushed N milliseconds after the first request has been // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`. // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise, // the timer should be set according to the number of clients, overall request rate and // desired maximum latency for a single command. For example, if there are many requests // being batched together at a high rate, the buffer will likely be filled before the timer // fires. Alternatively, if the request rate is lower the buffer will not be filled as often // before the timer fires. // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter // defaults to 3ms. google.protobuf.Duration buffer_flush_timeout = 5; // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts // can be created at any given time by any given worker thread (see `enable_redirection` for // more details). If the host is unknown and a connection cannot be created due to enforcing // this limit, then redirection will fail and the original redirection error will be passed // downstream unchanged. This limit defaults to 100. google.protobuf.UInt32Value max_upstream_unknown_connections = 6; // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate // count. bool enable_command_stats = 8; // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } message PrefixRoutes { message Route { // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are // collected for the shadow cluster making this feature useful for testing. message RequestMirrorPolicy { // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // If not specified or the runtime key is not present, all requests to the target cluster // will be mirrored. // // If specified, Envoy will lookup the runtime key to get the percentage of requests to the // mirror. api.v2.core.RuntimeFractionalPercent runtime_fraction = 2; // Set this to TRUE to only mirror write commands, this is effectively replicating the // writes in a "fire and forget" manner. bool exclude_read_commands = 3; } // String prefix that must match the beginning of the keys. Envoy will always favor the // longest match. string prefix = 1; // Indicates if the prefix needs to be removed from the key when forwarded. bool remove_prefix = 2; // Upstream cluster to forward the command to. string cluster = 3 [(validate.rules).string = {min_bytes: 1}]; // Indicates that the route has a request mirroring policy. repeated RequestMirrorPolicy request_mirror_policy = 4; } // List of prefix routes. repeated Route routes = 1; // Indicates that prefix matching should be case insensitive. bool case_insensitive = 2; // Optional catch-all route to forward commands that doesn't match any of the routes. The // catch-all route becomes required when no routes are specified. // .. attention:: // // This field is deprecated. Use a :ref:`catch_all // route` // instead. string catch_all_cluster = 3 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // Optional catch-all route to forward commands that doesn't match any of the routes. The // catch-all route becomes required when no routes are specified. Route catch_all_route = 4; } // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // Name of cluster from cluster manager. See the :ref:`configuration section // ` of the architecture overview for recommendations on // configuring the backing cluster. // // .. attention:: // // This field is deprecated. Use a :ref:`catch_all // route` // instead. string cluster = 2 [deprecated = true, (envoy.annotations.disallowed_by_default) = true]; // Network settings for the connection pool to the upstream clusters. ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; // Indicates that latency stat should be computed in microseconds. By default it is computed in // milliseconds. bool latency_in_micros = 4; // List of **unique** prefixes used to separate keys from different workloads to different // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all // cluster can be used to forward commands when there is no match. Time complexity of the // lookups are in O(min(longest key prefix, key length)). // // Example: // // .. code-block:: yaml // // prefix_routes: // routes: // - prefix: "ab" // cluster: "cluster_a" // - prefix: "abc" // cluster: "cluster_b" // // When using the above routes, the following prefixes would be sent to: // // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b. // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a. // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all // route` // would have retrieved the key from that cluster instead. // // See the :ref:`configuration section // ` of the architecture overview for recommendations on // configuring the backing clusters. PrefixRoutes prefix_routes = 5; // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis // AUTH command `_ with this password before enabling any other // command. If an AUTH command's password matches this password, an "OK" response will be returned // to the client. If the AUTH command password does not match this password, then an "ERR invalid // password" error will be returned. If any other command is received before AUTH when this // password is set, then a "NOAUTH Authentication required." error response will be sent to the // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no password is set" error will be returned. api.v2.core.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; } // RedisProtocolOptions specifies Redis upstream protocol options. This object is used in // :ref:`typed_extension_protocol_options`, // keyed by the name `envoy.filters.network.redis_proxy`. message RedisProtocolOptions { // Upstream server password as defined by the `requirepass` directive // `_ in the server's configuration file. api.v2.core.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; } ================================================ FILE: api/envoy/config/filter/network/sni_cluster/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/network/sni_cluster/v2/sni_cluster.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.sni_cluster.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.sni_cluster.v2"; option java_outer_classname = "SniClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.sni_cluster.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: SNI Cluster Filter] // Set the upstream cluster name from the SNI field in the TLS connection. // [#extension: envoy.filters.network.sni_cluster] message SniCluster { } ================================================ FILE: api/envoy/config/filter/network/tcp_proxy/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.tcp_proxy.v2; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/config/filter/accesslog/v2/accesslog.proto"; import "envoy/type/hash_policy.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.tcp_proxy.v2"; option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.tcp_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: TCP Proxy] // TCP Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.tcp_proxy] // [#next-free-field: 13] message TcpProxy { // [#not-implemented-hide:] Deprecated. // TCP Proxy filter configuration using V1 format. message DeprecatedV1 { option deprecated = true; // A TCP proxy route consists of a set of optional L4 criteria and the // name of a cluster. If a downstream connection matches all the // specified criteria, the cluster in the route is used for the // corresponding upstream connection. Routes are tried in the order // specified until a match is found. If no match is found, the connection // is closed. A route with no criteria is valid and always produces a // match. // [#next-free-field: 6] message TCPRoute { // The cluster to connect to when a the downstream network connection // matches the specified criteria. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // An optional list of IP address subnets in the form // “ip_address/xx”. The criteria is satisfied if the destination IP // address of the downstream connection is contained in at least one of // the specified subnets. If the parameter is not specified or the list // is empty, the destination IP address is ignored. The destination IP // address of the downstream connection might be different from the // addresses on which the proxy is listening if the connection has been // redirected. repeated api.v2.core.CidrRange destination_ip_list = 2; // An optional string containing a comma-separated list of port numbers // or ranges. The criteria is satisfied if the destination port of the // downstream connection is contained in at least one of the specified // ranges. If the parameter is not specified, the destination port is // ignored. The destination port address of the downstream connection // might be different from the port on which the proxy is listening if // the connection has been redirected. string destination_ports = 3; // An optional list of IP address subnets in the form // “ip_address/xx”. The criteria is satisfied if the source IP address // of the downstream connection is contained in at least one of the // specified subnets. If the parameter is not specified or the list is // empty, the source IP address is ignored. repeated api.v2.core.CidrRange source_ip_list = 4; // An optional string containing a comma-separated list of port numbers // or ranges. The criteria is satisfied if the source port of the // downstream connection is contained in at least one of the specified // ranges. If the parameter is not specified, the source port is // ignored. string source_ports = 5; } // The route table for the filter. All filter instances must have a route // table, even if it is empty. repeated TCPRoute routes = 1 [(validate.rules).repeated = {min_items: 1}]; } // Allows for specification of multiple upstream clusters along with weights // that indicate the percentage of traffic to be forwarded to each cluster. // The router selects an upstream cluster based on these weights. message WeightedCluster { message ClusterWeight { // Name of the upstream cluster. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the // clusters array determines the total weight. uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what is set in this field will be considered // for load balancing. Note that this will be merged with what's provided in // :ref:`TcpProxy.metadata_match // `, with values // here taking precedence. The filter name should be specified as *envoy.lb*. api.v2.core.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } // Configuration for tunneling TCP over other transports or application layers. // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will // remain the default. message TunnelingConfig { // The hostname to send in the synthesized CONNECT headers to the upstream proxy. string hostname = 1 [(validate.rules).string = {min_bytes: 1}]; } // The prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; oneof cluster_specifier { option (validate.required) = true; // The upstream cluster to connect to. string cluster = 2; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. WeightedCluster weighted_clusters = 10; } // Optional endpoint metadata match criteria. Only endpoints in the upstream // cluster with metadata matching that set in metadata_match will be // considered. The filter name should be specified as *envoy.lb*. api.v2.core.Metadata metadata_match = 9; // The idle timeout for connections managed by the TCP proxy filter. The idle timeout // is defined as the period in which there are no bytes sent or received on either // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set // to 0s, the timeout will be disabled. // // .. warning:: // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP // FIN packets, etc. google.protobuf.Duration idle_timeout = 8; // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy // filter. The idle timeout is defined as the period in which there is no // active traffic. If not set, there is no idle timeout. When the idle timeout // is reached the connection will be closed. The distinction between // downstream_idle_timeout/upstream_idle_timeout provides a means to set // timeout based on the last byte sent on the downstream/upstream connection. google.protobuf.Duration downstream_idle_timeout = 3; // [#not-implemented-hide:] google.protobuf.Duration upstream_idle_timeout = 4; // Configuration for :ref:`access logs ` // emitted by the this tcp_proxy. repeated accesslog.v2.AccessLog access_log = 5; // [#not-implemented-hide:] Deprecated. DeprecatedV1 deprecated_v1 = 6 [deprecated = true]; // The maximum number of unsuccessful connection attempts that will be made before // giving up. If the parameter is not specified, 1 connection attempt will be made. google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based // load balancing algorithms will select a host randomly. Currently the number of hash policies is // limited to 1. repeated type.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; // [#not-implemented-hide:] feature in progress // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload // will be proxied upstream as per usual. TunnelingConfig tunneling_config = 12; } ================================================ FILE: api/envoy/config/filter/network/thrift_proxy/v2alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/route:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/network/thrift_proxy/v2alpha1/README.md ================================================ Protocol buffer definitions for the Thrift proxy. ================================================ FILE: api/envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.thrift_proxy.v2alpha1; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.thrift_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Thrift Proxy Route Configuration] // Thrift Proxy :ref:`configuration overview `. message RouteConfiguration { // The name of the route configuration. Reserved for future use in asynchronous route discovery. string name = 1; // The list of routes that will be matched, in order, against incoming requests. The first route // that matches will be used. repeated Route routes = 2; } message Route { // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. RouteAction route = 2 [(validate.rules).message = {required: true}]; } message RouteMatch { oneof match_specifier { option (validate.required) = true; // If specified, the route must exactly match the request method name. As a special case, an // empty string matches any request method name. string method_name = 1; // If specified, the route must have the service name as the request method name prefix. As a // special case, an empty string matches any service name. Only relevant when service // multiplexing. string service_name = 2; } // Inverts whatever matching is done in the :ref:`method_name // ` or // :ref:`service_name // ` fields. // Cannot be combined with wildcard matching as that would result in routes never being matched. // // .. note:: // // This does not invert matching done as part of the :ref:`headers field // ` field. To // invert header matching, see :ref:`invert_match // `. bool invert = 3; // Specifies a set of headers that the route should match on. The router will check the request’s // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). Note that this only applies for Thrift transports and/or // protocols that support headers. repeated api.v2.route.HeaderMatcher headers = 4; } // [#next-free-field: 7] message RouteAction { oneof cluster_specifier { option (validate.required) = true; // Indicates a single upstream cluster to which the request should be routed // to. string cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. WeightedCluster weighted_clusters = 2; // Envoy will determine the cluster to route to by reading the value of the // Thrift header named by cluster_header from the request headers. If the // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. string cluster_header = 6 [(validate.rules).string = {min_bytes: 1}]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field will be considered. // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match // `, // with values there taking precedence. Keys and values should be provided under the "envoy.lb" // metadata key. api.v2.core.Metadata metadata_match = 3; // Specifies a set of rate limit configurations that could be applied to the route. // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders // action with the header name ":method-name". repeated api.v2.route.RateLimit rate_limits = 4; // Strip the service prefix from the method name, if there's a prefix. For // example, the method call Service:method would end up being just method. bool strip_service_name = 5; } // Allows for specification of multiple upstream clusters along with weights that indicate the // percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster // based on these weights. message WeightedCluster { message ClusterWeight { // Name of the upstream cluster. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total // weight. google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field, combined with what's // provided in :ref:`RouteAction's metadata_match // `, // will be considered. Values here will take precedence. Keys and values should be provided // under the "envoy.lb" metadata key. api.v2.core.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/config/filter/network/thrift_proxy/v2alpha1/thrift_proxy.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.thrift_proxy.v2alpha1; import "envoy/config/filter/network/thrift_proxy/v2alpha1/route.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.thrift_proxy.v2alpha1"; option java_outer_classname = "ThriftProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.thrift_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.thrift_proxy] // Thrift transport types supported by Envoy. enum TransportType { // For downstream connections, the Thrift proxy will attempt to determine which transport to use. // For upstream connections, the Thrift proxy will use same transport as the downstream // connection. AUTO_TRANSPORT = 0; // The Thrift proxy will use the Thrift framed transport. FRAMED = 1; // The Thrift proxy will use the Thrift unframed transport. UNFRAMED = 2; // The Thrift proxy will assume the client is using the Thrift header transport. HEADER = 3; } // Thrift Protocol types supported by Envoy. enum ProtocolType { // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol // detection. For upstream connections, the Thrift proxy will use the same protocol as the // downstream connection. AUTO_PROTOCOL = 0; // The Thrift proxy will use the Thrift binary protocol. BINARY = 1; // The Thrift proxy will use Thrift non-strict binary protocol. LAX_BINARY = 2; // The Thrift proxy will use the Thrift compact protocol. COMPACT = 3; // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. TWITTER = 4; } // [#next-free-field: 6] message ThriftProxy { // Supplies the type of transport that the Thrift proxy should use. Defaults to // :ref:`AUTO_TRANSPORT`. TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; // Supplies the type of protocol that the Thrift proxy should use. Defaults to // :ref:`AUTO_PROTOCOL`. ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // The route table for the connection manager is static and is specified in this property. RouteConfiguration route_config = 4; // A list of individual Thrift filters that make up the filter chain for requests made to the // Thrift proxy. Order matters as the filters are processed sequentially. For backwards // compatibility, if no thrift_filters are specified, a default Thrift router filter // (`envoy.filters.thrift.router`) is used. repeated ThriftFilter thrift_filters = 5; } // ThriftFilter configures a Thrift filter. message ThriftFilter { // The name of the filter to instantiate. The name must match a supported // filter. The built-in filters are: // // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } // ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in // in // :ref:`typed_extension_protocol_options`, // keyed by the name `envoy.filters.network.thrift_proxy`. message ThriftProtocolOptions { // Supplies the type of transport that the Thrift proxy should use for upstream connections. // Selecting // :ref:`AUTO_TRANSPORT`, // which is the default, causes the proxy to use the same transport as the downstream connection. TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; // Supplies the type of protocol that the Thrift proxy should use for upstream connections. // Selecting // :ref:`AUTO_PROTOCOL`, // which is the default, causes the proxy to use the same protocol as the downstream connection. ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/network/zookeeper_proxy/v1alpha1/zookeeper_proxy.proto ================================================ syntax = "proto3"; package envoy.config.filter.network.zookeeper_proxy.v1alpha1; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.network.zookeeper_proxy.v1alpha1"; option java_outer_classname = "ZookeeperProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.zookeeper_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: ZooKeeper proxy] // ZooKeeper Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.zookeeper_proxy] message ZooKeeperProxy { // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. // If the access log field is empty, access logs will not be written. string access_log = 2; // Messages — requests, responses and events — that are bigger than this value will // be ignored. If it is not set, the default value is 1Mb. // // The value here should match the jute.maxbuffer property in your cluster configuration: // // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options // // if that is set. If it isn't, ZooKeeper's default is also 1Mb. google.protobuf.UInt32Value max_packet_bytes = 3; } ================================================ FILE: api/envoy/config/filter/thrift/rate_limit/v2alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/ratelimit/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/filter/thrift/rate_limit/v2alpha1/rate_limit.proto ================================================ syntax = "proto3"; package envoy.config.filter.thrift.rate_limit.v2alpha1; import "envoy/config/ratelimit/v2/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.thrift.rate_limit.v2alpha1"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.thrift.ratelimit] // [#next-free-field: 6] message RateLimit { // The rate limit domain to use in the rate limit service request. string domain = 1 [(validate.rules).string = {min_bytes: 1}]; // Specifies the rate limit configuration stage. Each configured rate limit filter performs a // rate limit check using descriptors configured in the // :ref:`envoy_api_msg_config.filter.network.thrift_proxy.v2alpha1.RouteAction` for the request. // Only those entries with a matching stage number are used for a given filter. If not set, the // default stage number is 0. // // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. google.protobuf.Duration timeout = 3; // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. // Defaults to false. bool failure_mode_deny = 4; // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. ratelimit.v2.RateLimitServiceConfig rate_limit_service = 5 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/filter/thrift/router/v2alpha1/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/thrift/router/v2alpha1/router.proto ================================================ syntax = "proto3"; package envoy.config.filter.thrift.router.v2alpha1; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.filter.thrift.router.v2alpha1"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] // Thrift router :ref:`configuration overview `. // [#extension: envoy.filters.thrift.router] message Router { } ================================================ FILE: api/envoy/config/filter/udp/udp_proxy/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/filter/udp/udp_proxy/v2alpha/udp_proxy.proto ================================================ syntax = "proto3"; package envoy.config.filter.udp.udp_proxy.v2alpha; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.filter.udp.udp_proxy.v2alpha"; option java_outer_classname = "UdpProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.filters.udp.udp_proxy.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: UDP proxy] // UDP proxy :ref:`configuration overview `. // [#extension: envoy.filters.udp_listener.udp_proxy] // Configuration for the UDP proxy filter. message UdpProxyConfig { // The stat prefix used when emitting UDP proxy filter stats. string stat_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; oneof route_specifier { option (validate.required) = true; // The upstream cluster to connect to. string cluster = 2 [(validate.rules).string = {min_bytes: 1}]; } // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by // the session. The default if not specified is 1 minute. google.protobuf.Duration idle_timeout = 3; } ================================================ FILE: api/envoy/config/grpc_credential/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/grpc_credential/v2alpha/aws_iam.proto ================================================ syntax = "proto3"; package envoy.config.grpc_credential.v2alpha; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; option java_outer_classname = "AwsIamProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Grpc Credentials AWS IAM] // Configuration for AWS IAM Grpc Credentials Plugin // [#extension: envoy.grpc_credentials.aws_iam] message AwsIamConfig { // The `service namespace // `_ // of the Grpc endpoint. // // Example: appmesh string service_name = 1 [(validate.rules).string = {min_bytes: 1}]; // The `region `_ hosting the Grpc // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment // variable. // // Example: us-west-2 string region = 2; } ================================================ FILE: api/envoy/config/grpc_credential/v2alpha/file_based_metadata.proto ================================================ syntax = "proto3"; package envoy.config.grpc_credential.v2alpha; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v2alpha"; option java_outer_classname = "FileBasedMetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Grpc Credentials File Based Metadata] // Configuration for File Based Metadata Grpc Credentials Plugin // [#extension: envoy.grpc_credentials.file_based_metadata] message FileBasedMetadataConfig { // Location or inline data of secret to use for authentication of the Google gRPC connection // this secret will be attached to a header of the gRPC connection api.v2.core.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true]; // Metadata header key to use for sending the secret data // if no header key is set, "authorization" header will be used string header_key = 2; // Prefix to prepend to the secret in the metadata header // if no prefix is set, the default is to use no prefix string header_prefix = 3; } ================================================ FILE: api/envoy/config/grpc_credential/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/grpc_credential/v3/aws_iam.proto ================================================ syntax = "proto3"; package envoy.config.grpc_credential.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; option java_outer_classname = "AwsIamProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Grpc Credentials AWS IAM] // Configuration for AWS IAM Grpc Credentials Plugin // [#extension: envoy.grpc_credentials.aws_iam] message AwsIamConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.grpc_credential.v2alpha.AwsIamConfig"; // The `service namespace // `_ // of the Grpc endpoint. // // Example: appmesh string service_name = 1 [(validate.rules).string = {min_len: 1}]; // The `region `_ hosting the Grpc // endpoint. If unspecified, the extension will use the value in the ``AWS_REGION`` environment // variable. // // Example: us-west-2 string region = 2; } ================================================ FILE: api/envoy/config/grpc_credential/v3/file_based_metadata.proto ================================================ syntax = "proto3"; package envoy.config.grpc_credential.v3; import "envoy/config/core/v3/base.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.grpc_credential.v3"; option java_outer_classname = "FileBasedMetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Grpc Credentials File Based Metadata] // Configuration for File Based Metadata Grpc Credentials Plugin // [#extension: envoy.grpc_credentials.file_based_metadata] message FileBasedMetadataConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.grpc_credential.v2alpha.FileBasedMetadataConfig"; // Location or inline data of secret to use for authentication of the Google gRPC connection // this secret will be attached to a header of the gRPC connection core.v3.DataSource secret_data = 1 [(udpa.annotations.sensitive) = true]; // Metadata header key to use for sending the secret data // if no header key is set, "authorization" header will be used string header_key = 2; // Prefix to prepend to the secret in the metadata header // if no prefix is set, the default is to use no prefix string header_prefix = 3; } ================================================ FILE: api/envoy/config/health_checker/redis/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/health_checker/redis/v2/redis.proto ================================================ syntax = "proto3"; package envoy.config.health_checker.redis.v2; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.health_checker.redis.v2"; option java_outer_classname = "RedisProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Redis] // Redis health checker :ref:`configuration overview `. // [#extension: envoy.health_checkers.redis] message Redis { // If set, optionally perform ``EXISTS `` instead of ``PING``. A return value // from Redis of 0 (does not exist) is considered a passing healthcheck. A return value other // than 0 is considered a failure. This allows the user to mark a Redis instance for maintenance // by setting the specified key to any value and waiting for traffic to drain. string key = 1; } ================================================ FILE: api/envoy/config/listener/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/listener/v2/api_listener.proto ================================================ syntax = "proto3"; package envoy.config.listener.v2; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v2"; option java_outer_classname = "ApiListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.listener.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: API listener] // Describes a type of API listener, which is used in non-proxy clients. The type of API // exposed to the non-proxy application depends on the type of API listener. message ApiListener { // The type in this field determines the type of API listener. At present, the following // types are supported: // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the // specific config message for each type of API listener. We could not do this in v2 because // it would have caused circular dependencies for go protos: lds.proto depends on this file, // and http_connection_manager.proto depends on rds.proto, which is in the same directory as // lds.proto, so lds.proto cannot depend on this file.] google.protobuf.Any api_listener = 1; } ================================================ FILE: api/envoy/config/listener/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2:pkg", "//envoy/api/v2/listener:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/listener/v2:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/config/listener/v3/api_listener.proto ================================================ syntax = "proto3"; package envoy.config.listener.v3; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "ApiListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: API listener] // Describes a type of API listener, which is used in non-proxy clients. The type of API // exposed to the non-proxy application depends on the type of API listener. message ApiListener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v2.ApiListener"; // The type in this field determines the type of API listener. At present, the following // types are supported: // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the // specific config message for each type of API listener. We could not do this in v2 because // it would have caused circular dependencies for go protos: lds.proto depends on this file, // and http_connection_manager.proto depends on rds.proto, which is in the same directory as // lds.proto, so lds.proto cannot depend on this file.] google.protobuf.Any api_listener = 1; } ================================================ FILE: api/envoy/config/listener/v3/listener.proto ================================================ syntax = "proto3"; package envoy.config.listener.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/socket_option.proto"; import "envoy/config/listener/v3/api_listener.proto"; import "envoy/config/listener/v3/listener_components.proto"; import "envoy/config/listener/v3/udp_listener_config.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/collection_entry.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` // Listener list collections. Entries are *Listener* resources or references. // [#not-implemented-hide:] message ListenerCollection { repeated udpa.core.v1.CollectionEntry entries = 1; } // [#next-free-field: 25] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener"; enum DrainType { // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check // filter), listener removal/modification, and hot restart. DEFAULT = 0; // Drain in response to listener removal/modification and hot restart. This setting does not // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress // and egress listeners. MODIFY_ONLY = 1; } // [#not-implemented-hide:] message DeprecatedV1 { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener.DeprecatedV1"; // Whether the listener should bind to the port. A listener that doesn't // bind can only receive connections redirected from other listeners that // set use_original_dst parameter to true. Default is true. // // This is deprecated in v2, all Listeners will bind to their port. An // additional filter chain must be created for every original destination // port this listener may redirect to in v2, with the original port // specified in the FilterChainMatch destination_port field. // // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] google.protobuf.BoolValue bind_to_port = 1; } // Configuration for listener connection balancing. message ConnectionBalanceConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener.ConnectionBalanceConfig"; // A connection balancer implementation that does exact balancing. This means that a lock is // held during balancing so that connection counts are nearly exactly balanced between worker // threads. This is "nearly" exact in the sense that a connection might close in parallel thus // making the counts incorrect, but this should be rectified on the next accept. This balancer // sacrifices accept throughput for accuracy and should be used when there are a small number of // connections that rarely cycle (e.g., service mesh gRPC egress). message ExactBalance { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Listener.ConnectionBalanceConfig.ExactBalance"; } oneof balance_type { option (validate.required) = true; // If specified, the listener will use the exact connection balancer. ExactBalance exact_balance = 1; } } reserved 14, 4; reserved "use_original_dst"; // The unique name by which this listener is known. If no name is provided, // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically // updated or removed via :ref:`LDS ` a unique name must be provided. string name = 1; // The address that the listener should listen on. In general, the address must be unique, though // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on // Linux as the actual port will be allocated by the OS. core.v3.Address address = 2 [(validate.rules).message = {required: true}]; // A list of filter chains to consider for this listener. The // :ref:`FilterChain ` with the most specific // :ref:`FilterChainMatch ` criteria is used on a // connection. // // Example using SNI for filter chain selection can be found in the // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Listener metadata. core.v3.Metadata metadata = 6; // [#not-implemented-hide:] DeprecatedV1 deprecated_v1 = 7; // The type of draining to perform at a listener-wide level. DrainType drain_type = 8; // Listener filters have the opportunity to manipulate and augment the connection metadata that // is used in connection filter chain matching, for example. These filters are run before any in // :ref:`filter_chains `. Order matters as the // filters are processed sequentially right after a socket has been accepted by the listener, and // before a connection is created. // UDP Listener filters can be specified when the protocol in the listener socket address in // :ref:`protocol ` is :ref:`UDP // `. // UDP listeners currently support a single filter. repeated ListenerFilter listener_filters = 9; // The timeout to wait for all listener filters to complete operation. If the timeout is reached, // the accepted socket is closed without a connection being created unless // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the // timeout. If not specified, a default timeout of 15s is used. google.protobuf.Duration listener_filters_timeout = 15; // Whether a connection should be created when listener filters timeout. Default is false. // // .. attention:: // // Some listener filters, such as :ref:`Proxy Protocol filter // `, should not be used with this option. It will cause // unexpected behavior when a connection is created. bool continue_on_listener_filters_timeout = 17; // Whether the listener should be set as a transparent socket. // When this flag is set to true, connections can be redirected to the listener using an // *iptables* *TPROXY* target, in which case the original source and destination addresses and // ports are preserved on accepted connections. This flag should be used in combination with // :ref:`an original_dst ` :ref:`listener filter // ` to mark the connections' local addresses as // "restored." This can be used to hand off each redirected connection to another listener // associated with the connection's destination address. Direct connections to the socket without // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are // therefore treated as if they were redirected. // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. // When this flag is not set (default), the socket is not modified, i.e. the transparent option // is neither set nor reset. google.protobuf.BoolValue transparent = 10; // Whether the listener should set the *IP_FREEBIND* socket option. When this // flag is set to true, listeners can be bound to an IP address that is not // configured on the system running Envoy. When this flag is set to false, the // option *IP_FREEBIND* is disabled on the socket. When this flag is not set // (default), the socket is not modified, i.e. the option is neither enabled // nor disabled. google.protobuf.BoolValue freebind = 11; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. repeated core.v3.SocketOption socket_options = 13; // Whether the listener should accept TCP Fast Open (TFO) connections. // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on // the socket, with a queue length of the specified size // (see `details in RFC7413 `_). // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. // When this flag is not set (default), the socket is not modified, // i.e. the option is neither enabled nor disabled. // // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable // TCP_FASTOPEN. // See `ip-sysctl.txt `_. // // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; // Specifies the intended direction of the traffic relative to the local Envoy. core.v3.TrafficDirection traffic_direction = 16; // If the protocol in the listener socket address in :ref:`protocol // ` is :ref:`UDP // `, this field specifies the actual udp // listener to create, i.e. :ref:`udp_listener_name // ` = "raw_udp_listener" for // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". UdpListenerConfig udp_listener_config = 18; // Used to represent an API listener, which is used in non-proxy clients. The type of API // exposed to the non-proxy application depends on the type of API listener. // When this field is set, no other field except for :ref:`name` // should be set. // // .. note:: // // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, // not LDS. // // [#next-major-version: In the v3 API, instead of this messy approach where the socket // listener fields are directly in the top-level Listener message and the API listener types // are in the ApiListener message, the socket listener messages should be in their own message, // and the top-level Listener should essentially be a oneof that selects between the // socket listener and the various types of API listener. That way, a given Listener message // can structurally only contain the fields of the relevant type.] ApiListener api_listener = 19; // The listener's connection balancer configuration, currently only applicable to TCP listeners. // If no configuration is specified, Envoy will not attempt to balance active connections between // worker threads. ConnectionBalanceConfig connection_balance_config = 20; // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and // create one socket for each worker thread. This makes inbound connections // distribute among worker threads roughly evenly in cases where there are a high number // of connections. When this flag is set to false, all worker threads share one socket. // // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart // (see `3rd paragraph in 'soreuseport' commit message // `_). // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket // `_. bool reuse_port = 21; // Configuration for :ref:`access logs ` // emitted by this listener. repeated accesslog.v3.AccessLog access_log = 22; // If the protocol in the listener socket address in :ref:`protocol // ` is :ref:`UDP // `, this field specifies the actual udp // writer to create, i.e. :ref:`name ` // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. // If not present, treat it as "udp_default_writer". // [#not-implemented-hide:] core.v3.TypedExtensionConfig udp_writer_config = 23; // The maximum length a tcp listener's pending connections queue can grow to. If no value is // provided net.core.somaxconn will be used on Linux and 128 otherwise. google.protobuf.UInt32Value tcp_backlog_size = 24; } ================================================ FILE: api/envoy/config/listener/v3/listener_components.proto ================================================ syntax = "proto3"; package envoy.config.listener.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/range.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "ListenerComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener components] // Listener :ref:`configuration overview ` message Filter { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.Filter"; reserved 3, 2; reserved "config"; // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 4; } } // Specifies the match criteria for selecting a specific filter chain for a // listener. // // In order for a filter chain to be selected, *ALL* of its criteria must be // fulfilled by the incoming connection, properties of which are set by the // networking stack and/or listener filters. // // The following order applies: // // 1. Destination port. // 2. Destination IP address. // 3. Server name (e.g. SNI for TLS protocol), // 4. Transport protocol. // 5. Application protocols (e.g. ALPN for TLS protocol). // 6. Source type (e.g. any, local or external network). // 7. Source IP address. // 8. Source port. // // For criteria that allow ranges or wildcards, the most specific value in any // of the configured filter chains that matches the incoming connection is going // to be used (e.g. for SNI ``www.example.com`` the most specific match would be // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] // [#next-free-field: 13] message FilterChainMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.FilterChainMatch"; enum ConnectionSourceType { // Any connection source matches. ANY = 0; // Match a connection originating from the same host. SAME_IP_OR_LOOPBACK = 1; // Match a connection originating from a different host. EXTERNAL = 2; } reserved 1; // Optional destination port to consider when use_original_dst is set on the // listener in determining a filter chain match. google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; // If non-empty, an IP address and prefix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. repeated core.v3.CidrRange prefix_ranges = 3; // If non-empty, an IP address and suffix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. // [#not-implemented-hide:] string address_suffix = 4; // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; // Specifies the connection source IP match type. Can be any, local or external network. ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; // The criteria is satisfied if the source IP address of the downstream // connection is contained in at least one of the specified subnets. If the // parameter is not specified or the list is empty, the source IP address is // ignored. repeated core.v3.CidrRange source_prefix_ranges = 6; // The criteria is satisfied if the source port of the downstream connection // is contained in at least one of the specified ports. If the parameter is // not specified, the source port is ignored. repeated uint32 source_ports = 7 [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining // a filter chain match. Those values will be compared against the server names of a new // connection, when detected by one of the listener filters. // // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. // // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. // // .. attention:: // // See the :ref:`FAQ entry ` on how to configure SNI for more // information. repeated string server_names = 11; // If non-empty, a transport protocol to consider when determining a filter chain match. // This value will be compared against the transport protocol of a new connection, when // it's detected by one of the listener filters. // // Suggested values include: // // * ``raw_buffer`` - default, used when no transport protocol is detected, // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` // when TLS protocol is detected. string transport_protocol = 9; // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when // determining a filter chain match. Those values will be compared against the application // protocols of a new connection, when detected by one of the listener filters. // // Suggested values include: // // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector // `, // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` // // .. attention:: // // Currently, only :ref:`TLS Inspector ` provides // application protocol detection based on the requested // `ALPN `_ values. // // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, // and matching on values other than ``h2`` is going to lead to a lot of false negatives, // unless all connecting clients are known to use ALPN. repeated string application_protocols = 10; } // A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and // various other parameters. // [#next-free-field: 9] message FilterChain { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.FilterChain"; // The configuration for on-demand filter chain. If this field is not empty in FilterChain message, // a filter chain will be built on-demand. // On-demand filter chains help speedup the warming up of listeners since the building and initialization of // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. // Filter chains that are not often used can be set as on-demand. message OnDemandConfiguration { // The timeout to wait for filter chain placeholders to complete rebuilding. // 1. If this field is set to 0, timeout is disabled. // 2. If not specified, a default timeout of 15s is used. // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. // Upon failure or timeout, all connections related to this filter chain will be closed. // Rebuilding will start again on the next new connection. google.protobuf.Duration rebuild_timeout = 1; } reserved 2; reserved "tls_context"; // The criteria to use when matching a connection to this filter chain. FilterChainMatch filter_chain_match = 1; // A list of individual network filters that make up the filter chain for // connections established with the listener. Order matters as the filters are // processed sequentially as connection events happen. Note: If the filter // list is empty, the connection will close by default. repeated Filter filters = 3; // Whether the listener should expect a PROXY protocol V1 header on new // connections. If this option is enabled, the listener will assume that that // remote address of the connection is the one specified in the header. Some // load balancers including the AWS ELB support this option. If the option is // absent or set to false, Envoy will use the physical peer address of the // connection as the remote address. google.protobuf.BoolValue use_proxy_proto = 4; // [#not-implemented-hide:] filter chain metadata. core.v3.Metadata metadata = 5; // Optional custom transport socket implementation to use for downstream connections. // To setup TLS, set a transport socket with name `tls` and // :ref:`DownstreamTlsContext ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. core.v3.TransportSocket transport_socket = 6; // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter // chain is to be dynamically updated or removed via FCDS a unique name must be provided. string name = 7; // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. // If this field is not empty, the filter chain will be built on-demand. // Otherwise, the filter chain will be built normally and block listener warming. OnDemandConfiguration on_demand_configuration = 8; } // Listener filter chain match configuration. This is a recursive structure which allows complex // nested match configurations to be built using various logical operators. // // Examples: // // * Matches if the destination port is 3306. // // .. code-block:: yaml // // destination_port_range: // start: 3306 // end: 3307 // // * Matches if the destination port is 3306 or 15000. // // .. code-block:: yaml // // or_match: // rules: // - destination_port_range: // start: 3306 // end: 3306 // - destination_port_range: // start: 15000 // end: 15001 // // [#next-free-field: 6] message ListenerFilterChainMatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.ListenerFilterChainMatchPredicate"; // A set of match configurations used for logical operations. message MatchSet { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.ListenerFilterChainMatchPredicate.MatchSet"; // The list of rules that make up the set. repeated ListenerFilterChainMatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { option (validate.required) = true; // A set that describes a logical OR. If any member of the set matches, the match configuration // matches. MatchSet or_match = 1; // A set that describes a logical AND. If all members of the set match, the match configuration // matches. MatchSet and_match = 2; // A negation match. The match configuration will match if the negated match condition matches. ListenerFilterChainMatchPredicate not_match = 3; // The match configuration will always match. bool any_match = 4 [(validate.rules).bool = {const: true}]; // Match destination port. Particularly, the match evaluation must use the recovered local port if // the owning listener filter is after :ref:`an original_dst listener filter `. type.v3.Int32Range destination_port_range = 5; } } message ListenerFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.ListenerFilter"; reserved 2; reserved "config"; // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 3; } // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. // See :ref:`ListenerFilterChainMatchPredicate ` // for further examples. ListenerFilterChainMatchPredicate filter_disabled = 4; } ================================================ FILE: api/envoy/config/listener/v3/quic_config.proto ================================================ syntax = "proto3"; package envoy.config.listener.v3; import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "QuicConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: QUIC listener Config] // Configuration specific to the QUIC protocol. // Next id: 5 message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.QuicProtocolOptions"; // Maximum number of streams that the client can negotiate per connection. 100 // if not specified. google.protobuf.UInt32Value max_concurrent_streams = 1; // Maximum number of milliseconds that connection will be alive when there is // no network activity. 300000ms if not specified. google.protobuf.Duration idle_timeout = 2; // Connection timeout in milliseconds before the crypto handshake is finished. // 20000ms if not specified. google.protobuf.Duration crypto_handshake_timeout = 3; // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults // to enabled. core.v3.RuntimeFeatureFlag enabled = 4; } ================================================ FILE: api/envoy/config/listener/v3/udp_default_writer_config.proto ================================================ syntax = "proto3"; package envoy.config.listener.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "UdpDefaultWriterConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Udp Default Writer Config] // [#not-implemented-hide:] // Configuration specific to the Udp Default Writer. message UdpDefaultWriterOptions { } ================================================ FILE: api/envoy/config/listener/v3/udp_gso_batch_writer_config.proto ================================================ syntax = "proto3"; package envoy.config.listener.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "UdpGsoBatchWriterConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Udp Gso Batch Writer Config] // [#not-implemented-hide:] // Configuration specific to the Udp Gso Batch Writer. message UdpGsoBatchWriterOptions { } ================================================ FILE: api/envoy/config/listener/v3/udp_listener_config.proto ================================================ syntax = "proto3"; package envoy.config.listener.v3; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v3"; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: UDP Listener Config] // Listener :ref:`configuration overview ` message UdpListenerConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.UdpListenerConfig"; reserved 2; reserved "config"; // Used to look up UDP listener factory, matches "raw_udp_listener" or // "quic_listener" to create a specific udp listener. // If not specified, treat as "raw_udp_listener". string udp_listener_name = 1; // Used to create a specific listener factory. To some factory, e.g. // "raw_udp_listener", config is not needed. oneof config_type { google.protobuf.Any typed_config = 3; } } message ActiveRawUdpListenerConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.listener.ActiveRawUdpListenerConfig"; } ================================================ FILE: api/envoy/config/listener/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/listener/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/config/listener/v4alpha/api_listener.proto ================================================ syntax = "proto3"; package envoy.config.listener.v4alpha; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; option java_outer_classname = "ApiListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: API listener] // Describes a type of API listener, which is used in non-proxy clients. The type of API // exposed to the non-proxy application depends on the type of API listener. message ApiListener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.ApiListener"; // The type in this field determines the type of API listener. At present, the following // types are supported: // envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager (HTTP) // [#next-major-version: In the v3 API, replace this Any field with a oneof containing the // specific config message for each type of API listener. We could not do this in v2 because // it would have caused circular dependencies for go protos: lds.proto depends on this file, // and http_connection_manager.proto depends on rds.proto, which is in the same directory as // lds.proto, so lds.proto cannot depend on this file.] google.protobuf.Any api_listener = 1; } ================================================ FILE: api/envoy/config/listener/v4alpha/listener.proto ================================================ syntax = "proto3"; package envoy.config.listener.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/socket_option.proto"; import "envoy/config/listener/v4alpha/api_listener.proto"; import "envoy/config/listener/v4alpha/listener_components.proto"; import "envoy/config/listener/v4alpha/udp_listener_config.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/collection_entry.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; option java_outer_classname = "ListenerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Listener configuration] // Listener :ref:`configuration overview ` // Listener list collections. Entries are *Listener* resources or references. // [#not-implemented-hide:] message ListenerCollection { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.ListenerCollection"; repeated udpa.core.v1.CollectionEntry entries = 1; } // [#next-free-field: 25] message Listener { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener"; enum DrainType { // Drain in response to calling /healthcheck/fail admin endpoint (along with the health check // filter), listener removal/modification, and hot restart. DEFAULT = 0; // Drain in response to listener removal/modification and hot restart. This setting does not // include /healthcheck/fail. This setting may be desirable if Envoy is hosting both ingress // and egress listeners. MODIFY_ONLY = 1; } // [#not-implemented-hide:] message DeprecatedV1 { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener.DeprecatedV1"; // Whether the listener should bind to the port. A listener that doesn't // bind can only receive connections redirected from other listeners that // set use_original_dst parameter to true. Default is true. // // This is deprecated in v2, all Listeners will bind to their port. An // additional filter chain must be created for every original destination // port this listener may redirect to in v2, with the original port // specified in the FilterChainMatch destination_port field. // // [#comment:TODO(PiotrSikora): Remove this once verified that we no longer need it.] google.protobuf.BoolValue bind_to_port = 1; } // Configuration for listener connection balancing. message ConnectionBalanceConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener.ConnectionBalanceConfig"; // A connection balancer implementation that does exact balancing. This means that a lock is // held during balancing so that connection counts are nearly exactly balanced between worker // threads. This is "nearly" exact in the sense that a connection might close in parallel thus // making the counts incorrect, but this should be rectified on the next accept. This balancer // sacrifices accept throughput for accuracy and should be used when there are a small number of // connections that rarely cycle (e.g., service mesh gRPC egress). message ExactBalance { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Listener.ConnectionBalanceConfig.ExactBalance"; } oneof balance_type { option (validate.required) = true; // If specified, the listener will use the exact connection balancer. ExactBalance exact_balance = 1; } } reserved 14, 4; reserved "use_original_dst"; // The unique name by which this listener is known. If no name is provided, // Envoy will allocate an internal UUID for the listener. If the listener is to be dynamically // updated or removed via :ref:`LDS ` a unique name must be provided. string name = 1; // The address that the listener should listen on. In general, the address must be unique, though // that is governed by the bind rules of the OS. E.g., multiple listeners can listen on port 0 on // Linux as the actual port will be allocated by the OS. core.v4alpha.Address address = 2 [(validate.rules).message = {required: true}]; // A list of filter chains to consider for this listener. The // :ref:`FilterChain ` with the most specific // :ref:`FilterChainMatch ` criteria is used on a // connection. // // Example using SNI for filter chain selection can be found in the // :ref:`FAQ entry `. repeated FilterChain filter_chains = 3; // Soft limit on size of the listener’s new connection read and write buffers. // If unspecified, an implementation defined default is applied (1MiB). google.protobuf.UInt32Value per_connection_buffer_limit_bytes = 5 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Listener metadata. core.v4alpha.Metadata metadata = 6; // [#not-implemented-hide:] DeprecatedV1 deprecated_v1 = 7; // The type of draining to perform at a listener-wide level. DrainType drain_type = 8; // Listener filters have the opportunity to manipulate and augment the connection metadata that // is used in connection filter chain matching, for example. These filters are run before any in // :ref:`filter_chains `. Order matters as the // filters are processed sequentially right after a socket has been accepted by the listener, and // before a connection is created. // UDP Listener filters can be specified when the protocol in the listener socket address in // :ref:`protocol ` is :ref:`UDP // `. // UDP listeners currently support a single filter. repeated ListenerFilter listener_filters = 9; // The timeout to wait for all listener filters to complete operation. If the timeout is reached, // the accepted socket is closed without a connection being created unless // `continue_on_listener_filters_timeout` is set to true. Specify 0 to disable the // timeout. If not specified, a default timeout of 15s is used. google.protobuf.Duration listener_filters_timeout = 15; // Whether a connection should be created when listener filters timeout. Default is false. // // .. attention:: // // Some listener filters, such as :ref:`Proxy Protocol filter // `, should not be used with this option. It will cause // unexpected behavior when a connection is created. bool continue_on_listener_filters_timeout = 17; // Whether the listener should be set as a transparent socket. // When this flag is set to true, connections can be redirected to the listener using an // *iptables* *TPROXY* target, in which case the original source and destination addresses and // ports are preserved on accepted connections. This flag should be used in combination with // :ref:`an original_dst ` :ref:`listener filter // ` to mark the connections' local addresses as // "restored." This can be used to hand off each redirected connection to another listener // associated with the connection's destination address. Direct connections to the socket without // using *TPROXY* cannot be distinguished from connections redirected using *TPROXY* and are // therefore treated as if they were redirected. // When this flag is set to false, the listener's socket is explicitly reset as non-transparent. // Setting this flag requires Envoy to run with the *CAP_NET_ADMIN* capability. // When this flag is not set (default), the socket is not modified, i.e. the transparent option // is neither set nor reset. google.protobuf.BoolValue transparent = 10; // Whether the listener should set the *IP_FREEBIND* socket option. When this // flag is set to true, listeners can be bound to an IP address that is not // configured on the system running Envoy. When this flag is set to false, the // option *IP_FREEBIND* is disabled on the socket. When this flag is not set // (default), the socket is not modified, i.e. the option is neither enabled // nor disabled. google.protobuf.BoolValue freebind = 11; // Additional socket options that may not be present in Envoy source code or // precompiled binaries. repeated core.v4alpha.SocketOption socket_options = 13; // Whether the listener should accept TCP Fast Open (TFO) connections. // When this flag is set to a value greater than 0, the option TCP_FASTOPEN is enabled on // the socket, with a queue length of the specified size // (see `details in RFC7413 `_). // When this flag is set to 0, the option TCP_FASTOPEN is disabled on the socket. // When this flag is not set (default), the socket is not modified, // i.e. the option is neither enabled nor disabled. // // On Linux, the net.ipv4.tcp_fastopen kernel parameter must include flag 0x2 to enable // TCP_FASTOPEN. // See `ip-sysctl.txt `_. // // On macOS, only values of 0, 1, and unset are valid; other values may result in an error. // To set the queue length on macOS, set the net.inet.tcp.fastopen_backlog kernel parameter. google.protobuf.UInt32Value tcp_fast_open_queue_length = 12; // Specifies the intended direction of the traffic relative to the local Envoy. core.v4alpha.TrafficDirection traffic_direction = 16; // If the protocol in the listener socket address in :ref:`protocol // ` is :ref:`UDP // `, this field specifies the actual udp // listener to create, i.e. :ref:`udp_listener_name // ` = "raw_udp_listener" for // creating a packet-oriented UDP listener. If not present, treat it as "raw_udp_listener". UdpListenerConfig udp_listener_config = 18; // Used to represent an API listener, which is used in non-proxy clients. The type of API // exposed to the non-proxy application depends on the type of API listener. // When this field is set, no other field except for :ref:`name` // should be set. // // .. note:: // // Currently only one ApiListener can be installed; and it can only be done via bootstrap config, // not LDS. // // [#next-major-version: In the v3 API, instead of this messy approach where the socket // listener fields are directly in the top-level Listener message and the API listener types // are in the ApiListener message, the socket listener messages should be in their own message, // and the top-level Listener should essentially be a oneof that selects between the // socket listener and the various types of API listener. That way, a given Listener message // can structurally only contain the fields of the relevant type.] ApiListener api_listener = 19; // The listener's connection balancer configuration, currently only applicable to TCP listeners. // If no configuration is specified, Envoy will not attempt to balance active connections between // worker threads. ConnectionBalanceConfig connection_balance_config = 20; // When this flag is set to true, listeners set the *SO_REUSEPORT* socket option and // create one socket for each worker thread. This makes inbound connections // distribute among worker threads roughly evenly in cases where there are a high number // of connections. When this flag is set to false, all worker threads share one socket. // // Before Linux v4.19-rc1, new TCP connections may be rejected during hot restart // (see `3rd paragraph in 'soreuseport' commit message // `_). // This issue was fixed by `tcp: Avoid TCP syncookie rejected by SO_REUSEPORT socket // `_. bool reuse_port = 21; // Configuration for :ref:`access logs ` // emitted by this listener. repeated accesslog.v4alpha.AccessLog access_log = 22; // If the protocol in the listener socket address in :ref:`protocol // ` is :ref:`UDP // `, this field specifies the actual udp // writer to create, i.e. :ref:`name ` // = "udp_default_writer" for creating a udp writer with writing in passthrough mode, // = "udp_gso_batch_writer" for creating a udp writer with writing in batch mode. // If not present, treat it as "udp_default_writer". // [#not-implemented-hide:] core.v4alpha.TypedExtensionConfig udp_writer_config = 23; // The maximum length a tcp listener's pending connections queue can grow to. If no value is // provided net.core.somaxconn will be used on Linux and 128 otherwise. google.protobuf.UInt32Value tcp_backlog_size = 24; } ================================================ FILE: api/envoy/config/listener/v4alpha/listener_components.proto ================================================ syntax = "proto3"; package envoy.config.listener.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/type/v3/range.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; option java_outer_classname = "ListenerComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Listener components] // Listener :ref:`configuration overview ` message Filter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.Filter"; reserved 3, 2; reserved "config"; // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 4; } } // Specifies the match criteria for selecting a specific filter chain for a // listener. // // In order for a filter chain to be selected, *ALL* of its criteria must be // fulfilled by the incoming connection, properties of which are set by the // networking stack and/or listener filters. // // The following order applies: // // 1. Destination port. // 2. Destination IP address. // 3. Server name (e.g. SNI for TLS protocol), // 4. Transport protocol. // 5. Application protocols (e.g. ALPN for TLS protocol). // 6. Source type (e.g. any, local or external network). // 7. Source IP address. // 8. Source port. // // For criteria that allow ranges or wildcards, the most specific value in any // of the configured filter chains that matches the incoming connection is going // to be used (e.g. for SNI ``www.example.com`` the most specific match would be // ``www.example.com``, then ``*.example.com``, then ``*.com``, then any filter // chain without ``server_names`` requirements). // // [#comment: Implemented rules are kept in the preference order, with deprecated fields // listed at the end, because that's how we want to list them in the docs. // // [#comment:TODO(PiotrSikora): Add support for configurable precedence of the rules] // [#next-free-field: 13] message FilterChainMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.FilterChainMatch"; enum ConnectionSourceType { // Any connection source matches. ANY = 0; // Match a connection originating from the same host. SAME_IP_OR_LOOPBACK = 1; // Match a connection originating from a different host. EXTERNAL = 2; } reserved 1; // Optional destination port to consider when use_original_dst is set on the // listener in determining a filter chain match. google.protobuf.UInt32Value destination_port = 8 [(validate.rules).uint32 = {lte: 65535 gte: 1}]; // If non-empty, an IP address and prefix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. repeated core.v4alpha.CidrRange prefix_ranges = 3; // If non-empty, an IP address and suffix length to match addresses when the // listener is bound to 0.0.0.0/:: or when use_original_dst is specified. // [#not-implemented-hide:] string address_suffix = 4; // [#not-implemented-hide:] google.protobuf.UInt32Value suffix_len = 5; // Specifies the connection source IP match type. Can be any, local or external network. ConnectionSourceType source_type = 12 [(validate.rules).enum = {defined_only: true}]; // The criteria is satisfied if the source IP address of the downstream // connection is contained in at least one of the specified subnets. If the // parameter is not specified or the list is empty, the source IP address is // ignored. repeated core.v4alpha.CidrRange source_prefix_ranges = 6; // The criteria is satisfied if the source port of the downstream connection // is contained in at least one of the specified ports. If the parameter is // not specified, the source port is ignored. repeated uint32 source_ports = 7 [(validate.rules).repeated = {items {uint32 {lte: 65535 gte: 1}}}]; // If non-empty, a list of server names (e.g. SNI for TLS protocol) to consider when determining // a filter chain match. Those values will be compared against the server names of a new // connection, when detected by one of the listener filters. // // The server name will be matched against all wildcard domains, i.e. ``www.example.com`` // will be first matched against ``www.example.com``, then ``*.example.com``, then ``*.com``. // // Note that partial wildcards are not supported, and values like ``*w.example.com`` are invalid. // // .. attention:: // // See the :ref:`FAQ entry ` on how to configure SNI for more // information. repeated string server_names = 11; // If non-empty, a transport protocol to consider when determining a filter chain match. // This value will be compared against the transport protocol of a new connection, when // it's detected by one of the listener filters. // // Suggested values include: // // * ``raw_buffer`` - default, used when no transport protocol is detected, // * ``tls`` - set by :ref:`envoy.filters.listener.tls_inspector ` // when TLS protocol is detected. string transport_protocol = 9; // If non-empty, a list of application protocols (e.g. ALPN for TLS protocol) to consider when // determining a filter chain match. Those values will be compared against the application // protocols of a new connection, when detected by one of the listener filters. // // Suggested values include: // // * ``http/1.1`` - set by :ref:`envoy.filters.listener.tls_inspector // `, // * ``h2`` - set by :ref:`envoy.filters.listener.tls_inspector ` // // .. attention:: // // Currently, only :ref:`TLS Inspector ` provides // application protocol detection based on the requested // `ALPN `_ values. // // However, the use of ALPN is pretty much limited to the HTTP/2 traffic on the Internet, // and matching on values other than ``h2`` is going to lead to a lot of false negatives, // unless all connecting clients are known to use ALPN. repeated string application_protocols = 10; } // A filter chain wraps a set of match criteria, an option TLS context, a set of filters, and // various other parameters. // [#next-free-field: 9] message FilterChain { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.FilterChain"; // The configuration for on-demand filter chain. If this field is not empty in FilterChain message, // a filter chain will be built on-demand. // On-demand filter chains help speedup the warming up of listeners since the building and initialization of // an on-demand filter chain will be postponed to the arrival of new connection requests that require this filter chain. // Filter chains that are not often used can be set as on-demand. message OnDemandConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.FilterChain.OnDemandConfiguration"; // The timeout to wait for filter chain placeholders to complete rebuilding. // 1. If this field is set to 0, timeout is disabled. // 2. If not specified, a default timeout of 15s is used. // Rebuilding will wait until dependencies are ready, have failed, or this timeout is reached. // Upon failure or timeout, all connections related to this filter chain will be closed. // Rebuilding will start again on the next new connection. google.protobuf.Duration rebuild_timeout = 1; } reserved 2; reserved "tls_context"; // The criteria to use when matching a connection to this filter chain. FilterChainMatch filter_chain_match = 1; // A list of individual network filters that make up the filter chain for // connections established with the listener. Order matters as the filters are // processed sequentially as connection events happen. Note: If the filter // list is empty, the connection will close by default. repeated Filter filters = 3; // Whether the listener should expect a PROXY protocol V1 header on new // connections. If this option is enabled, the listener will assume that that // remote address of the connection is the one specified in the header. Some // load balancers including the AWS ELB support this option. If the option is // absent or set to false, Envoy will use the physical peer address of the // connection as the remote address. google.protobuf.BoolValue use_proxy_proto = 4; // [#not-implemented-hide:] filter chain metadata. core.v4alpha.Metadata metadata = 5; // Optional custom transport socket implementation to use for downstream connections. // To setup TLS, set a transport socket with name `tls` and // :ref:`DownstreamTlsContext ` in the `typed_config`. // If no transport socket configuration is specified, new connections // will be set up with plaintext. core.v4alpha.TransportSocket transport_socket = 6; // [#not-implemented-hide:] The unique name (or empty) by which this filter chain is known. If no // name is provided, Envoy will allocate an internal UUID for the filter chain. If the filter // chain is to be dynamically updated or removed via FCDS a unique name must be provided. string name = 7; // [#not-implemented-hide:] The configuration to specify whether the filter chain will be built on-demand. // If this field is not empty, the filter chain will be built on-demand. // Otherwise, the filter chain will be built normally and block listener warming. OnDemandConfiguration on_demand_configuration = 8; } // Listener filter chain match configuration. This is a recursive structure which allows complex // nested match configurations to be built using various logical operators. // // Examples: // // * Matches if the destination port is 3306. // // .. code-block:: yaml // // destination_port_range: // start: 3306 // end: 3307 // // * Matches if the destination port is 3306 or 15000. // // .. code-block:: yaml // // or_match: // rules: // - destination_port_range: // start: 3306 // end: 3306 // - destination_port_range: // start: 15000 // end: 15001 // // [#next-free-field: 6] message ListenerFilterChainMatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.ListenerFilterChainMatchPredicate"; // A set of match configurations used for logical operations. message MatchSet { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.ListenerFilterChainMatchPredicate.MatchSet"; // The list of rules that make up the set. repeated ListenerFilterChainMatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { option (validate.required) = true; // A set that describes a logical OR. If any member of the set matches, the match configuration // matches. MatchSet or_match = 1; // A set that describes a logical AND. If all members of the set match, the match configuration // matches. MatchSet and_match = 2; // A negation match. The match configuration will match if the negated match condition matches. ListenerFilterChainMatchPredicate not_match = 3; // The match configuration will always match. bool any_match = 4 [(validate.rules).bool = {const: true}]; // Match destination port. Particularly, the match evaluation must use the recovered local port if // the owning listener filter is after :ref:`an original_dst listener filter `. type.v3.Int32Range destination_port_range = 5; } } message ListenerFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.ListenerFilter"; reserved 2; reserved "config"; // The name of the filter to instantiate. The name must match a // :ref:`supported filter `. string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. // See the supported filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 3; } // Optional match predicate used to disable the filter. The filter is enabled when this field is empty. // See :ref:`ListenerFilterChainMatchPredicate ` // for further examples. ListenerFilterChainMatchPredicate filter_disabled = 4; } ================================================ FILE: api/envoy/config/listener/v4alpha/quic_config.proto ================================================ syntax = "proto3"; package envoy.config.listener.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; option java_outer_classname = "QuicConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: QUIC listener Config] // Configuration specific to the QUIC protocol. // Next id: 5 message QuicProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.QuicProtocolOptions"; // Maximum number of streams that the client can negotiate per connection. 100 // if not specified. google.protobuf.UInt32Value max_concurrent_streams = 1; // Maximum number of milliseconds that connection will be alive when there is // no network activity. 300000ms if not specified. google.protobuf.Duration idle_timeout = 2; // Connection timeout in milliseconds before the crypto handshake is finished. // 20000ms if not specified. google.protobuf.Duration crypto_handshake_timeout = 3; // Runtime flag that controls whether the listener is enabled or not. If not specified, defaults // to enabled. core.v4alpha.RuntimeFeatureFlag enabled = 4; } ================================================ FILE: api/envoy/config/listener/v4alpha/udp_default_writer_config.proto ================================================ syntax = "proto3"; package envoy.config.listener.v4alpha; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; option java_outer_classname = "UdpDefaultWriterConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Udp Default Writer Config] // [#not-implemented-hide:] // Configuration specific to the Udp Default Writer. message UdpDefaultWriterOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.UdpDefaultWriterOptions"; } ================================================ FILE: api/envoy/config/listener/v4alpha/udp_gso_batch_writer_config.proto ================================================ syntax = "proto3"; package envoy.config.listener.v4alpha; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; option java_outer_classname = "UdpGsoBatchWriterConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Udp Gso Batch Writer Config] // [#not-implemented-hide:] // Configuration specific to the Udp Gso Batch Writer. message UdpGsoBatchWriterOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.UdpGsoBatchWriterOptions"; } ================================================ FILE: api/envoy/config/listener/v4alpha/udp_listener_config.proto ================================================ syntax = "proto3"; package envoy.config.listener.v4alpha; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.config.listener.v4alpha"; option java_outer_classname = "UdpListenerConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: UDP Listener Config] // Listener :ref:`configuration overview ` message UdpListenerConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.UdpListenerConfig"; reserved 2; reserved "config"; // Used to look up UDP listener factory, matches "raw_udp_listener" or // "quic_listener" to create a specific udp listener. // If not specified, treat as "raw_udp_listener". string udp_listener_name = 1; // Used to create a specific listener factory. To some factory, e.g. // "raw_udp_listener", config is not needed. oneof config_type { google.protobuf.Any typed_config = 3; } } message ActiveRawUdpListenerConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.listener.v3.ActiveRawUdpListenerConfig"; } ================================================ FILE: api/envoy/config/metrics/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/metrics/v2/metrics_service.proto ================================================ syntax = "proto3"; package envoy.config.metrics.v2; import "envoy/api/v2/core/grpc_service.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v2"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metrics service] // Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to create // Metrics Service. // [#extension: envoy.stat_sinks.metrics_service] message MetricsServiceConfig { // The upstream gRPC cluster that hosts the metrics service. api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/metrics/v2/stats.proto ================================================ syntax = "proto3"; package envoy.config.metrics.v2; import "envoy/api/v2/core/address.proto"; import "envoy/type/matcher/string.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v2"; option java_outer_classname = "StatsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Stats] // Statistics :ref:`architecture overview `. // Configuration for pluggable stats sinks. message StatsSink { // The name of the stats sink to instantiate. The name must match a supported // stats sink. The built-in stats sinks are: // // * :ref:`envoy.stat_sinks.statsd ` // * :ref:`envoy.stat_sinks.dog_statsd ` // * :ref:`envoy.stat_sinks.metrics_service ` // * :ref:`envoy.stat_sinks.hystrix ` // // Sinks optionally support tagged/multiple dimensional metrics. string name = 1; // Stats sink specific configuration which depends on the sink being instantiated. See // :ref:`StatsdSink ` for an example. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } // Statistics configuration such as tagging. message StatsConfig { // Each stat name is iteratively processed through these tag specifiers. // When a tag is matched, the first capture group is removed from the name so // later :ref:`TagSpecifiers ` cannot match that // same portion of the match. repeated TagSpecifier stats_tags = 1; // Use all default tag regexes specified in Envoy. These can be combined with // custom tags specified in :ref:`stats_tags // `. They will be processed before // the custom tags. // // .. note:: // // If any default tags are specified twice, the config will be considered // invalid. // // See :repo:`well_known_names.h ` for a list of the // default tags in Envoy. // // If not provided, the value is assumed to be true. google.protobuf.BoolValue use_all_default_tags = 2; // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated // as normal. Preventing the instantiation of certain families of stats can improve memory // performance for Envoys running especially large configs. // // .. warning:: // Excluding stats may affect Envoy's behavior in undocumented ways. See // `issue #8771 `_ for more information. // If any unexpected behavior changes are observed, please open a new issue immediately. StatsMatcher stats_matcher = 3; } // Configuration for disabling stat instantiation. message StatsMatcher { // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to // instantiate all stats, there is no need to construct a StatsMatcher. // // However, StatsMatcher can be used to limit the creation of families of stats in order to // conserve memory. Stats can either be disabled entirely, or they can be // limited by either an exclusion or an inclusion list of :ref:`StringMatcher // ` protos: // // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to // `false`, all stats will be instantiated. // // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the // list will not instantiate. // // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of // the StringMatchers in the list. // // // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based // matcher rather than a regex-based matcher. // // Example 1. Excluding all stats. // // .. code-block:: json // // { // "statsMatcher": { // "rejectAll": "true" // } // } // // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: // // .. code-block:: json // // { // "statsMatcher": { // "exclusionList": { // "patterns": [ // { // "prefix": "cluster." // } // ] // } // } // } // // Example 3. Including only manager-related stats: // // .. code-block:: json // // { // "statsMatcher": { // "inclusionList": { // "patterns": [ // { // "prefix": "cluster_manager." // }, // { // "prefix": "listener_manager." // } // ] // } // } // } // oneof stats_matcher { option (validate.required) = true; // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all // stats are enabled. bool reject_all = 1; // Exclusive match. All stats are enabled except for those matching one of the supplied // StringMatcher protos. type.matcher.ListStringMatcher exclusion_list = 2; // Inclusive match. No stats are enabled except for those matching one of the supplied // StringMatcher protos. type.matcher.ListStringMatcher inclusion_list = 3; } } // Designates a tag name and value pair. The value may be either a fixed value // or a regex providing the value via capture groups. The specified tag will be // unconditionally set if a fixed value, otherwise it will only be set if one // or more capture groups in the regex match. message TagSpecifier { // Attaches an identifier to the tag values to identify the tag being in the // sink. Envoy has a set of default names and regexes to extract dynamic // portions of existing stats, which can be found in :repo:`well_known_names.h // ` in the Envoy repository. If a :ref:`tag_name // ` is provided in the config and // neither :ref:`regex ` or // :ref:`fixed_value ` were specified, // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. // // .. note:: // // It is invalid to specify the same tag name twice in a config. string tag_name = 1; oneof tag_value { // Designates a tag to strip from the tag extracted name and provide as a named // tag value for all statistics. This will only occur if any part of the name // matches the regex provided with one or more capture groups. // // The first capture group identifies the portion of the name to remove. The // second capture group (which will normally be nested inside the first) will // designate the value of the tag for the statistic. If no second capture // group is provided, the first will also be used to set the value of the tag. // All other capture groups will be ignored. // // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and // one tag specifier: // // .. code-block:: json // // { // "tag_name": "envoy.cluster_name", // "regex": "^cluster\\.((.+?)\\.)" // } // // Note that the regex will remove ``foo_cluster.`` making the tag extracted // name ``cluster.upstream_rq_timeout`` and the tag value for // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no // ``.`` character because of the second capture group). // // Example 2. a stat name // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two // tag specifiers: // // .. code-block:: json // // [ // { // "tag_name": "envoy.http_user_agent", // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" // }, // { // "tag_name": "envoy.http_conn_manager_prefix", // "regex": "^http\\.((.*?)\\.)" // } // ] // // The two regexes of the specifiers will be processed in the definition order. // // The first regex will remove ``ios.``, leaving the tag extracted name // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag // ``envoy.http_user_agent`` will be added with tag value ``ios``. // // The second regex will remove ``connection_manager_1.`` from the tag // extracted name produced by the first regex // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag // ``envoy.http_conn_manager_prefix`` will be added with the tag value // ``connection_manager_1``. string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; // Specifies a fixed tag value for the ``tag_name``. string fixed_value = 3; } } // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] message StatsdSink { oneof statsd_specifier { option (validate.required) = true; // The UDP address of a running `statsd `_ // compliant listener. If specified, statistics will be flushed to this // address. api.v2.core.Address address = 1; // The name of a cluster that is running a TCP `statsd // `_ compliant listener. If specified, // Envoy will connect to this cluster to flush statistics. string tcp_cluster_name = 2; } // Optional custom prefix for StatsdSink. If // specified, this will override the default prefix. // For example: // // .. code-block:: json // // { // "prefix" : "envoy-prod" // } // // will change emitted stats to // // .. code-block:: cpp // // envoy-prod.test_counter:1|c // envoy-prod.test_timer:5|ms // // Note that the default prefix, "envoy", will be used if a prefix is not // specified. // // Stats with default prefix: // // .. code-block:: cpp // // envoy.test_counter:1|c // envoy.test_timer:5|ms string prefix = 3; } // Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. // The sink emits stats with `DogStatsD `_ // compatible tags. Tags are configurable via :ref:`StatsConfig // `. // [#extension: envoy.stat_sinks.dog_statsd] message DogStatsdSink { reserved 2; oneof dog_statsd_specifier { option (validate.required) = true; // The UDP address of a running DogStatsD compliant listener. If specified, // statistics will be flushed to this address. api.v2.core.Address address = 1; } // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. // The sink emits stats in `text/event-stream // `_ // formatted stream for use by `Hystrix dashboard // `_. // // Note that only a single HystrixSink should be configured. // // Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. // [#extension: envoy.stat_sinks.hystrix] message HystrixSink { // The number of buckets the rolling statistical window is divided into. // // Each time the sink is flushed, all relevant Envoy statistics are sampled and // added to the rolling window (removing the oldest samples in the window // in the process). The sink then outputs the aggregate statistics across the // current rolling window to the event stream(s). // // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets // // More detailed explanation can be found in `Hystrix wiki // `_. int64 num_buckets = 1; } ================================================ FILE: api/envoy/config/metrics/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/metrics/v2:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/metrics/v3/metrics_service.proto ================================================ syntax = "proto3"; package envoy.config.metrics.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v3"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metrics service] // Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to create // Metrics Service. // [#extension: envoy.stat_sinks.metrics_service] message MetricsServiceConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.MetricsServiceConfig"; // The upstream gRPC cluster that hosts the metrics service. core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; // API version for metric service transport protocol. This describes the metric service gRPC // endpoint and version of messages used on the wire. core.v3.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; // If true, counters are reported as the delta between flushing intervals. Otherwise, the current // counter value is reported. Defaults to false. // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the // sink will take updates from the :ref:`MetricsResponse `. google.protobuf.BoolValue report_counters_as_deltas = 2; } ================================================ FILE: api/envoy/config/metrics/v3/stats.proto ================================================ syntax = "proto3"; package envoy.config.metrics.v3; import "envoy/config/core/v3/address.proto"; import "envoy/type/matcher/v3/string.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v3"; option java_outer_classname = "StatsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Stats] // Statistics :ref:`architecture overview `. // Configuration for pluggable stats sinks. message StatsSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsSink"; reserved 2; reserved "config"; // The name of the stats sink to instantiate. The name must match a supported // stats sink. The built-in stats sinks are: // // * :ref:`envoy.stat_sinks.statsd ` // * :ref:`envoy.stat_sinks.dog_statsd ` // * :ref:`envoy.stat_sinks.metrics_service ` // * :ref:`envoy.stat_sinks.hystrix ` // // Sinks optionally support tagged/multiple dimensional metrics. string name = 1; // Stats sink specific configuration which depends on the sink being instantiated. See // :ref:`StatsdSink ` for an example. oneof config_type { google.protobuf.Any typed_config = 3; } } // Statistics configuration such as tagging. message StatsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsConfig"; // Each stat name is iteratively processed through these tag specifiers. // When a tag is matched, the first capture group is removed from the name so // later :ref:`TagSpecifiers ` cannot match that // same portion of the match. repeated TagSpecifier stats_tags = 1; // Use all default tag regexes specified in Envoy. These can be combined with // custom tags specified in :ref:`stats_tags // `. They will be processed before // the custom tags. // // .. note:: // // If any default tags are specified twice, the config will be considered // invalid. // // See :repo:`well_known_names.h ` for a list of the // default tags in Envoy. // // If not provided, the value is assumed to be true. google.protobuf.BoolValue use_all_default_tags = 2; // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated // as normal. Preventing the instantiation of certain families of stats can improve memory // performance for Envoys running especially large configs. // // .. warning:: // Excluding stats may affect Envoy's behavior in undocumented ways. See // `issue #8771 `_ for more information. // If any unexpected behavior changes are observed, please open a new issue immediately. StatsMatcher stats_matcher = 3; // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first // match is applied. If no match is found (or if no rules are set), the following default buckets // are used: // // .. code-block:: json // // [ // 0.5, // 1, // 5, // 10, // 25, // 50, // 100, // 250, // 500, // 1000, // 2500, // 5000, // 10000, // 30000, // 60000, // 300000, // 600000, // 1800000, // 3600000 // ] repeated HistogramBucketSettings histogram_bucket_settings = 4; } // Configuration for disabling stat instantiation. message StatsMatcher { // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to // instantiate all stats, there is no need to construct a StatsMatcher. // // However, StatsMatcher can be used to limit the creation of families of stats in order to // conserve memory. Stats can either be disabled entirely, or they can be // limited by either an exclusion or an inclusion list of :ref:`StringMatcher // ` protos: // // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to // `false`, all stats will be instantiated. // // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the // list will not instantiate. // // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of // the StringMatchers in the list. // // // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based // matcher rather than a regex-based matcher. // // Example 1. Excluding all stats. // // .. code-block:: json // // { // "statsMatcher": { // "rejectAll": "true" // } // } // // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: // // .. code-block:: json // // { // "statsMatcher": { // "exclusionList": { // "patterns": [ // { // "prefix": "cluster." // } // ] // } // } // } // // Example 3. Including only manager-related stats: // // .. code-block:: json // // { // "statsMatcher": { // "inclusionList": { // "patterns": [ // { // "prefix": "cluster_manager." // }, // { // "prefix": "listener_manager." // } // ] // } // } // } // option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsMatcher"; oneof stats_matcher { option (validate.required) = true; // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all // stats are enabled. bool reject_all = 1; // Exclusive match. All stats are enabled except for those matching one of the supplied // StringMatcher protos. type.matcher.v3.ListStringMatcher exclusion_list = 2; // Inclusive match. No stats are enabled except for those matching one of the supplied // StringMatcher protos. type.matcher.v3.ListStringMatcher inclusion_list = 3; } } // Designates a tag name and value pair. The value may be either a fixed value // or a regex providing the value via capture groups. The specified tag will be // unconditionally set if a fixed value, otherwise it will only be set if one // or more capture groups in the regex match. message TagSpecifier { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.TagSpecifier"; // Attaches an identifier to the tag values to identify the tag being in the // sink. Envoy has a set of default names and regexes to extract dynamic // portions of existing stats, which can be found in :repo:`well_known_names.h // ` in the Envoy repository. If a :ref:`tag_name // ` is provided in the config and // neither :ref:`regex ` or // :ref:`fixed_value ` were specified, // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. // // .. note:: // // It is invalid to specify the same tag name twice in a config. string tag_name = 1; oneof tag_value { // Designates a tag to strip from the tag extracted name and provide as a named // tag value for all statistics. This will only occur if any part of the name // matches the regex provided with one or more capture groups. // // The first capture group identifies the portion of the name to remove. The // second capture group (which will normally be nested inside the first) will // designate the value of the tag for the statistic. If no second capture // group is provided, the first will also be used to set the value of the tag. // All other capture groups will be ignored. // // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and // one tag specifier: // // .. code-block:: json // // { // "tag_name": "envoy.cluster_name", // "regex": "^cluster\\.((.+?)\\.)" // } // // Note that the regex will remove ``foo_cluster.`` making the tag extracted // name ``cluster.upstream_rq_timeout`` and the tag value for // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no // ``.`` character because of the second capture group). // // Example 2. a stat name // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two // tag specifiers: // // .. code-block:: json // // [ // { // "tag_name": "envoy.http_user_agent", // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" // }, // { // "tag_name": "envoy.http_conn_manager_prefix", // "regex": "^http\\.((.*?)\\.)" // } // ] // // The two regexes of the specifiers will be processed in the definition order. // // The first regex will remove ``ios.``, leaving the tag extracted name // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag // ``envoy.http_user_agent`` will be added with tag value ``ios``. // // The second regex will remove ``connection_manager_1.`` from the tag // extracted name produced by the first regex // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag // ``envoy.http_conn_manager_prefix`` will be added with the tag value // ``connection_manager_1``. string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; // Specifies a fixed tag value for the ``tag_name``. string fixed_value = 3; } } // Specifies a matcher for stats and the buckets that matching stats should use. message HistogramBucketSettings { // The stats that this rule applies to. The match is applied to the original stat name // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. type.matcher.v3.StringMatcher match = 1 [(validate.rules).message = {required: true}]; // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. // The order of the buckets does not matter. repeated double buckets = 2 [(validate.rules).repeated = { min_items: 1 unique: true items {double {gt: 0.0}} }]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] message StatsdSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.StatsdSink"; oneof statsd_specifier { option (validate.required) = true; // The UDP address of a running `statsd `_ // compliant listener. If specified, statistics will be flushed to this // address. core.v3.Address address = 1; // The name of a cluster that is running a TCP `statsd // `_ compliant listener. If specified, // Envoy will connect to this cluster to flush statistics. string tcp_cluster_name = 2; } // Optional custom prefix for StatsdSink. If // specified, this will override the default prefix. // For example: // // .. code-block:: json // // { // "prefix" : "envoy-prod" // } // // will change emitted stats to // // .. code-block:: cpp // // envoy-prod.test_counter:1|c // envoy-prod.test_timer:5|ms // // Note that the default prefix, "envoy", will be used if a prefix is not // specified. // // Stats with default prefix: // // .. code-block:: cpp // // envoy.test_counter:1|c // envoy.test_timer:5|ms string prefix = 3; } // Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. // The sink emits stats with `DogStatsD `_ // compatible tags. Tags are configurable via :ref:`StatsConfig // `. // [#extension: envoy.stat_sinks.dog_statsd] message DogStatsdSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.DogStatsdSink"; reserved 2; oneof dog_statsd_specifier { option (validate.required) = true; // The UDP address of a running DogStatsD compliant listener. If specified, // statistics will be flushed to this address. core.v3.Address address = 1; } // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; // Optional max datagram size to use when sending UDP messages. By default Envoy // will emit one metric per datagram. By specifying a max-size larger than a single // metric, Envoy will emit multiple, new-line separated metrics. The max datagram // size should not exceed your network's MTU. // // Note that this value may not be respected if smaller than a single metric. google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. // The sink emits stats in `text/event-stream // `_ // formatted stream for use by `Hystrix dashboard // `_. // // Note that only a single HystrixSink should be configured. // // Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. // [#extension: envoy.stat_sinks.hystrix] message HystrixSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v2.HystrixSink"; // The number of buckets the rolling statistical window is divided into. // // Each time the sink is flushed, all relevant Envoy statistics are sampled and // added to the rolling window (removing the oldest samples in the window // in the process). The sink then outputs the aggregate statistics across the // current rolling window to the event stream(s). // // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets // // More detailed explanation can be found in `Hystrix wiki // `_. int64 num_buckets = 1; } ================================================ FILE: api/envoy/config/metrics/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/metrics/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/metrics/v4alpha/metrics_service.proto ================================================ syntax = "proto3"; package envoy.config.metrics.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Metrics service] // Metrics Service is configured as a built-in *envoy.stat_sinks.metrics_service* :ref:`StatsSink // `. This opaque configuration will be used to create // Metrics Service. // [#extension: envoy.stat_sinks.metrics_service] message MetricsServiceConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.MetricsServiceConfig"; // The upstream gRPC cluster that hosts the metrics service. core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; // API version for metric service transport protocol. This describes the metric service gRPC // endpoint and version of messages used on the wire. core.v4alpha.ApiVersion transport_api_version = 3 [(validate.rules).enum = {defined_only: true}]; // If true, counters are reported as the delta between flushing intervals. Otherwise, the current // counter value is reported. Defaults to false. // Eventually (https://github.com/envoyproxy/envoy/issues/10968) if this value is not set, the // sink will take updates from the :ref:`MetricsResponse `. google.protobuf.BoolValue report_counters_as_deltas = 2; } ================================================ FILE: api/envoy/config/metrics/v4alpha/stats.proto ================================================ syntax = "proto3"; package envoy.config.metrics.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.metrics.v4alpha"; option java_outer_classname = "StatsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Stats] // Statistics :ref:`architecture overview `. // Configuration for pluggable stats sinks. message StatsSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsSink"; reserved 2; reserved "config"; // The name of the stats sink to instantiate. The name must match a supported // stats sink. The built-in stats sinks are: // // * :ref:`envoy.stat_sinks.statsd ` // * :ref:`envoy.stat_sinks.dog_statsd ` // * :ref:`envoy.stat_sinks.metrics_service ` // * :ref:`envoy.stat_sinks.hystrix ` // // Sinks optionally support tagged/multiple dimensional metrics. string name = 1; // Stats sink specific configuration which depends on the sink being instantiated. See // :ref:`StatsdSink ` for an example. oneof config_type { google.protobuf.Any typed_config = 3; } } // Statistics configuration such as tagging. message StatsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsConfig"; // Each stat name is iteratively processed through these tag specifiers. // When a tag is matched, the first capture group is removed from the name so // later :ref:`TagSpecifiers ` cannot match that // same portion of the match. repeated TagSpecifier stats_tags = 1; // Use all default tag regexes specified in Envoy. These can be combined with // custom tags specified in :ref:`stats_tags // `. They will be processed before // the custom tags. // // .. note:: // // If any default tags are specified twice, the config will be considered // invalid. // // See :repo:`well_known_names.h ` for a list of the // default tags in Envoy. // // If not provided, the value is assumed to be true. google.protobuf.BoolValue use_all_default_tags = 2; // Inclusion/exclusion matcher for stat name creation. If not provided, all stats are instantiated // as normal. Preventing the instantiation of certain families of stats can improve memory // performance for Envoys running especially large configs. // // .. warning:: // Excluding stats may affect Envoy's behavior in undocumented ways. See // `issue #8771 `_ for more information. // If any unexpected behavior changes are observed, please open a new issue immediately. StatsMatcher stats_matcher = 3; // Defines rules for setting the histogram buckets. Rules are evaluated in order, and the first // match is applied. If no match is found (or if no rules are set), the following default buckets // are used: // // .. code-block:: json // // [ // 0.5, // 1, // 5, // 10, // 25, // 50, // 100, // 250, // 500, // 1000, // 2500, // 5000, // 10000, // 30000, // 60000, // 300000, // 600000, // 1800000, // 3600000 // ] repeated HistogramBucketSettings histogram_bucket_settings = 4; } // Configuration for disabling stat instantiation. message StatsMatcher { // The instantiation of stats is unrestricted by default. If the goal is to configure Envoy to // instantiate all stats, there is no need to construct a StatsMatcher. // // However, StatsMatcher can be used to limit the creation of families of stats in order to // conserve memory. Stats can either be disabled entirely, or they can be // limited by either an exclusion or an inclusion list of :ref:`StringMatcher // ` protos: // // * If `reject_all` is set to `true`, no stats will be instantiated. If `reject_all` is set to // `false`, all stats will be instantiated. // // * If an exclusion list is supplied, any stat name matching *any* of the StringMatchers in the // list will not instantiate. // // * If an inclusion list is supplied, no stats will instantiate, except those matching *any* of // the StringMatchers in the list. // // // A StringMatcher can be used to match against an exact string, a suffix / prefix, or a regex. // **NB:** For performance reasons, it is highly recommended to use a prefix- or suffix-based // matcher rather than a regex-based matcher. // // Example 1. Excluding all stats. // // .. code-block:: json // // { // "statsMatcher": { // "rejectAll": "true" // } // } // // Example 2. Excluding all cluster-specific stats, but not cluster-manager stats: // // .. code-block:: json // // { // "statsMatcher": { // "exclusionList": { // "patterns": [ // { // "prefix": "cluster." // } // ] // } // } // } // // Example 3. Including only manager-related stats: // // .. code-block:: json // // { // "statsMatcher": { // "inclusionList": { // "patterns": [ // { // "prefix": "cluster_manager." // }, // { // "prefix": "listener_manager." // } // ] // } // } // } // option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsMatcher"; oneof stats_matcher { option (validate.required) = true; // If `reject_all` is true, then all stats are disabled. If `reject_all` is false, then all // stats are enabled. bool reject_all = 1; // Exclusive match. All stats are enabled except for those matching one of the supplied // StringMatcher protos. type.matcher.v4alpha.ListStringMatcher exclusion_list = 2; // Inclusive match. No stats are enabled except for those matching one of the supplied // StringMatcher protos. type.matcher.v4alpha.ListStringMatcher inclusion_list = 3; } } // Designates a tag name and value pair. The value may be either a fixed value // or a regex providing the value via capture groups. The specified tag will be // unconditionally set if a fixed value, otherwise it will only be set if one // or more capture groups in the regex match. message TagSpecifier { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.TagSpecifier"; // Attaches an identifier to the tag values to identify the tag being in the // sink. Envoy has a set of default names and regexes to extract dynamic // portions of existing stats, which can be found in :repo:`well_known_names.h // ` in the Envoy repository. If a :ref:`tag_name // ` is provided in the config and // neither :ref:`regex ` or // :ref:`fixed_value ` were specified, // Envoy will attempt to find that name in its set of defaults and use the accompanying regex. // // .. note:: // // It is invalid to specify the same tag name twice in a config. string tag_name = 1; oneof tag_value { // Designates a tag to strip from the tag extracted name and provide as a named // tag value for all statistics. This will only occur if any part of the name // matches the regex provided with one or more capture groups. // // The first capture group identifies the portion of the name to remove. The // second capture group (which will normally be nested inside the first) will // designate the value of the tag for the statistic. If no second capture // group is provided, the first will also be used to set the value of the tag. // All other capture groups will be ignored. // // Example 1. a stat name ``cluster.foo_cluster.upstream_rq_timeout`` and // one tag specifier: // // .. code-block:: json // // { // "tag_name": "envoy.cluster_name", // "regex": "^cluster\\.((.+?)\\.)" // } // // Note that the regex will remove ``foo_cluster.`` making the tag extracted // name ``cluster.upstream_rq_timeout`` and the tag value for // ``envoy.cluster_name`` will be ``foo_cluster`` (note: there will be no // ``.`` character because of the second capture group). // // Example 2. a stat name // ``http.connection_manager_1.user_agent.ios.downstream_cx_total`` and two // tag specifiers: // // .. code-block:: json // // [ // { // "tag_name": "envoy.http_user_agent", // "regex": "^http(?=\\.).*?\\.user_agent\\.((.+?)\\.)\\w+?$" // }, // { // "tag_name": "envoy.http_conn_manager_prefix", // "regex": "^http\\.((.*?)\\.)" // } // ] // // The two regexes of the specifiers will be processed in the definition order. // // The first regex will remove ``ios.``, leaving the tag extracted name // ``http.connection_manager_1.user_agent.downstream_cx_total``. The tag // ``envoy.http_user_agent`` will be added with tag value ``ios``. // // The second regex will remove ``connection_manager_1.`` from the tag // extracted name produced by the first regex // ``http.connection_manager_1.user_agent.downstream_cx_total``, leaving // ``http.user_agent.downstream_cx_total`` as the tag extracted name. The tag // ``envoy.http_conn_manager_prefix`` will be added with the tag value // ``connection_manager_1``. string regex = 2 [(validate.rules).string = {max_bytes: 1024}]; // Specifies a fixed tag value for the ``tag_name``. string fixed_value = 3; } } // Specifies a matcher for stats and the buckets that matching stats should use. message HistogramBucketSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.HistogramBucketSettings"; // The stats that this rule applies to. The match is applied to the original stat name // before tag-extraction, for example `cluster.exampleclustername.upstream_cx_length_ms`. type.matcher.v4alpha.StringMatcher match = 1 [(validate.rules).message = {required: true}]; // Each value is the upper bound of a bucket. Each bucket must be greater than 0 and unique. // The order of the buckets does not matter. repeated double buckets = 2 [(validate.rules).repeated = { min_items: 1 unique: true items {double {gt: 0.0}} }]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.statsd* sink. This sink does not support // tagged metrics. // [#extension: envoy.stat_sinks.statsd] message StatsdSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.StatsdSink"; oneof statsd_specifier { option (validate.required) = true; // The UDP address of a running `statsd `_ // compliant listener. If specified, statistics will be flushed to this // address. core.v4alpha.Address address = 1; // The name of a cluster that is running a TCP `statsd // `_ compliant listener. If specified, // Envoy will connect to this cluster to flush statistics. string tcp_cluster_name = 2; } // Optional custom prefix for StatsdSink. If // specified, this will override the default prefix. // For example: // // .. code-block:: json // // { // "prefix" : "envoy-prod" // } // // will change emitted stats to // // .. code-block:: cpp // // envoy-prod.test_counter:1|c // envoy-prod.test_timer:5|ms // // Note that the default prefix, "envoy", will be used if a prefix is not // specified. // // Stats with default prefix: // // .. code-block:: cpp // // envoy.test_counter:1|c // envoy.test_timer:5|ms string prefix = 3; } // Stats configuration proto schema for built-in *envoy.stat_sinks.dog_statsd* sink. // The sink emits stats with `DogStatsD `_ // compatible tags. Tags are configurable via :ref:`StatsConfig // `. // [#extension: envoy.stat_sinks.dog_statsd] message DogStatsdSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.DogStatsdSink"; reserved 2; oneof dog_statsd_specifier { option (validate.required) = true; // The UDP address of a running DogStatsD compliant listener. If specified, // statistics will be flushed to this address. core.v4alpha.Address address = 1; } // Optional custom metric name prefix. See :ref:`StatsdSink's prefix field // ` for more details. string prefix = 3; // Optional max datagram size to use when sending UDP messages. By default Envoy // will emit one metric per datagram. By specifying a max-size larger than a single // metric, Envoy will emit multiple, new-line separated metrics. The max datagram // size should not exceed your network's MTU. // // Note that this value may not be respected if smaller than a single metric. google.protobuf.UInt64Value max_bytes_per_datagram = 4 [(validate.rules).uint64 = {gt: 0}]; } // Stats configuration proto schema for built-in *envoy.stat_sinks.hystrix* sink. // The sink emits stats in `text/event-stream // `_ // formatted stream for use by `Hystrix dashboard // `_. // // Note that only a single HystrixSink should be configured. // // Streaming is started through an admin endpoint :http:get:`/hystrix_event_stream`. // [#extension: envoy.stat_sinks.hystrix] message HystrixSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.metrics.v3.HystrixSink"; // The number of buckets the rolling statistical window is divided into. // // Each time the sink is flushed, all relevant Envoy statistics are sampled and // added to the rolling window (removing the oldest samples in the window // in the process). The sink then outputs the aggregate statistics across the // current rolling window to the event stream(s). // // rolling_window(ms) = stats_flush_interval(ms) * num_of_buckets // // More detailed explanation can be found in `Hystrix wiki // `_. int64 num_buckets = 1; } ================================================ FILE: api/envoy/config/overload/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/overload/v2alpha/overload.proto ================================================ syntax = "proto3"; package envoy.config.overload.v2alpha; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.overload.v2alpha"; option java_outer_classname = "OverloadProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Overload Manager] // The Overload Manager provides an extensible framework to protect Envoy instances // from overload of various resources (memory, cpu, file descriptors, etc). // It monitors a configurable set of resources and notifies registered listeners // when triggers related to those resources fire. message ResourceMonitor { // The name of the resource monitor to instantiate. Must match a registered // resource monitor type. The built-in resource monitors are: // // * :ref:`envoy.resource_monitors.fixed_heap // ` // * :ref:`envoy.resource_monitors.injected_resource // ` string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Configuration for the resource monitor being instantiated. oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } message ThresholdTrigger { // If the resource pressure is greater than or equal to this value, the trigger // will fire. double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; } message Trigger { // The name of the resource this is a trigger for. string name = 1 [(validate.rules).string = {min_bytes: 1}]; oneof trigger_oneof { option (validate.required) = true; ThresholdTrigger threshold = 2; } } message OverloadAction { // The name of the overload action. This is just a well-known string that listeners can // use for registering callbacks. Custom overload actions should be named using reverse // DNS to ensure uniqueness. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // A set of triggers for this action. If any of these triggers fire the overload action // is activated. Listeners are notified when the overload action transitions from // inactivated to activated, or vice versa. repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; } message OverloadManager { // The interval for refreshing resource usage. google.protobuf.Duration refresh_interval = 1; // The set of resources to monitor. repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; // The set of overload actions. repeated OverloadAction actions = 3; } ================================================ FILE: api/envoy/config/overload/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/overload/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/overload/v3/overload.proto ================================================ syntax = "proto3"; package envoy.config.overload.v3; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.overload.v3"; option java_outer_classname = "OverloadProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Overload Manager] // The Overload Manager provides an extensible framework to protect Envoy instances // from overload of various resources (memory, cpu, file descriptors, etc). // It monitors a configurable set of resources and notifies registered listeners // when triggers related to those resources fire. message ResourceMonitor { option (udpa.annotations.versioning).previous_message_type = "envoy.config.overload.v2alpha.ResourceMonitor"; reserved 2; reserved "config"; // The name of the resource monitor to instantiate. Must match a registered // resource monitor type. The built-in resource monitors are: // // * :ref:`envoy.resource_monitors.fixed_heap // ` // * :ref:`envoy.resource_monitors.injected_resource // ` string name = 1 [(validate.rules).string = {min_len: 1}]; // Configuration for the resource monitor being instantiated. oneof config_type { google.protobuf.Any typed_config = 3; } } message ThresholdTrigger { option (udpa.annotations.versioning).previous_message_type = "envoy.config.overload.v2alpha.ThresholdTrigger"; // If the resource pressure is greater than or equal to this value, the trigger // will enter saturation. double value = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; } message ScaledTrigger { // If the resource pressure is greater than this value, the trigger will be in the // :ref:`scaling ` state with value // `(pressure - scaling_threshold) / (saturation_threshold - scaling_threshold)`. double scaling_threshold = 1 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; // If the resource pressure is greater than this value, the trigger will enter saturation. double saturation_threshold = 2 [(validate.rules).double = {lte: 1.0 gte: 0.0}]; } message Trigger { option (udpa.annotations.versioning).previous_message_type = "envoy.config.overload.v2alpha.Trigger"; // The name of the resource this is a trigger for. string name = 1 [(validate.rules).string = {min_len: 1}]; oneof trigger_oneof { option (validate.required) = true; ThresholdTrigger threshold = 2; ScaledTrigger scaled = 3; } } message OverloadAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.overload.v2alpha.OverloadAction"; // The name of the overload action. This is just a well-known string that listeners can // use for registering callbacks. Custom overload actions should be named using reverse // DNS to ensure uniqueness. string name = 1 [(validate.rules).string = {min_len: 1}]; // A set of triggers for this action. The state of the action is the maximum // state of all triggers, which can be scaling between 0 and 1 or saturated. Listeners // are notified when the overload action changes state. repeated Trigger triggers = 2 [(validate.rules).repeated = {min_items: 1}]; } message OverloadManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.overload.v2alpha.OverloadManager"; // The interval for refreshing resource usage. google.protobuf.Duration refresh_interval = 1; // The set of resources to monitor. repeated ResourceMonitor resource_monitors = 2 [(validate.rules).repeated = {min_items: 1}]; // The set of overload actions. repeated OverloadAction actions = 3; } ================================================ FILE: api/envoy/config/ratelimit/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/ratelimit/v2/rls.proto ================================================ syntax = "proto3"; package envoy.config.ratelimit.v2; import "envoy/api/v2/core/grpc_service.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.ratelimit.v2"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate limit service] // Rate limit :ref:`configuration overview `. message RateLimitServiceConfig { reserved 1, 3; // Specifies the gRPC service that hosts the rate limit service. The client // will connect to this cluster when it needs to make rate limit service // requests. api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/ratelimit/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/ratelimit/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/ratelimit/v3/rls.proto ================================================ syntax = "proto3"; package envoy.config.ratelimit.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.ratelimit.v3"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit service] // Rate limit :ref:`configuration overview `. message RateLimitServiceConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.ratelimit.v2.RateLimitServiceConfig"; reserved 1, 3; // Specifies the gRPC service that hosts the rate limit service. The client // will connect to this cluster when it needs to make rate limit service // requests. core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; // API version for rate limit transport protocol. This describes the rate limit gRPC endpoint and // version of messages used on the wire. core.v3.ApiVersion transport_api_version = 4 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/config/rbac/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/route:pkg", "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) ================================================ FILE: api/envoy/config/rbac/v2/rbac.proto ================================================ syntax = "proto3"; package envoy.config.rbac.v2; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/route/route_components.proto"; import "envoy/type/matcher/metadata.proto"; import "envoy/type/matcher/path.proto"; import "envoy/type/matcher/string.proto"; import "google/api/expr/v1alpha1/syntax.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.rbac.v2"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a // service. RBAC policies are additive. The policies are examined in order. A request is allowed // once a matching policy is found (suppose the `action` is ALLOW). // // Here is an example of RBAC configuration. It has two policies: // // * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so // does "cluster.local/ns/default/sa/superuser". // // * Any user can read ("GET") the service at paths with prefix "/products", so long as the // destination port is either 80 or 443. // // .. code-block:: yaml // // action: ALLOW // policies: // "service-admin": // permissions: // - any: true // principals: // - authenticated: // principal_name: // exact: "cluster.local/ns/default/sa/admin" // - authenticated: // principal_name: // exact: "cluster.local/ns/default/sa/superuser" // "product-viewer": // permissions: // - and_rules: // rules: // - header: { name: ":method", exact_match: "GET" } // - url_path: // path: { prefix: "/products" } // - or_rules: // rules: // - destination_port: 80 // - destination_port: 443 // principals: // - any: true // message RBAC { // Should we do safe-list or block-list style access control? enum Action { // The policies grant access to principals. The rest is denied. This is safe-list style // access control. This is the default type. ALLOW = 0; // The policies deny access to principals. The rest is allowed. This is block-list style // access control. DENY = 1; } // The action to take if a policy matches. The request is allowed if and only if: // // * `action` is "ALLOWED" and at least one policy matches // * `action` is "DENY" and none of the policies match Action action = 1; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } // Policy specifies a role and the principals that are assigned/denied the role. A policy matches if // and only if at least one of its permissions match the action taking place AND at least one of its // principals match the downstream AND the condition is true if specified. message Policy { // Required. The set of permissions that define a role. Each permission is matched with OR // semantics. To match all actions for this policy, a single Permission with the `any` field set // to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; // Required. The set of principals that are assigned/denied the role based on “action”. Each // principal is matched with OR semantics. To match all downstreams for this policy, a single // Principal with the `any` field set to true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; // An optional symbolic expression specifying an access control // :ref:`condition `. The condition is combined // with the permissions and the principals as a clause with AND semantics. google.api.expr.v1alpha1.Expr condition = 3; } // Permission defines an action (or actions) that a principal can take. // [#next-free-field: 11] message Permission { // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, // each are applied with the associated behavior. message Set { repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; } oneof rule { option (validate.required) = true; // A set of rules that all must match in order to define the action. Set and_rules = 1; // A set of rules where at least one must match in order to define the action. Set or_rules = 2; // When any is set, it matches any action. bool any = 3 [(validate.rules).bool = {const: true}]; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` // field if you want to match the URL path without the query and fragment string. api.v2.route.HeaderMatcher header = 4; // A URL path on the incoming HTTP request. Only available for HTTP. type.matcher.PathMatcher url_path = 10; // A CIDR block that describes the destination IP. api.v2.core.CidrRange destination_ip = 5; // A port number that describes the destination port connecting to. uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. type.matcher.MetadataMatcher metadata = 7; // Negates matching the provided permission. For instance, if the value of `not_rule` would // match, this permission would not match. Conversely, if the value of `not_rule` would not // match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is // typically TLS SNI. // // .. attention:: // // The behavior of this field may be affected by how Envoy is configured // as explained below. // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for // the :ref:`server name `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // // * A :ref:`listener filter ` may // overwrite a connection's requested server name within Envoy. // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. type.matcher.StringMatcher requested_server_name = 9; } } // Principal defines an identity or a group of identities for a downstream subject. // [#next-free-field: 12] message Principal { // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. Depending on the context, // each are applied with the associated behavior. message Set { repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; } // Authentication attributes for a downstream. message Authenticated { reserved 1; // The name of the principal. If set, The URI SAN or DNS SAN in that order is used from the // certificate, otherwise the subject field is used. If unset, it applies to any user that is // authenticated. type.matcher.StringMatcher principal_name = 2; } oneof identifier { option (validate.required) = true; // A set of identifiers that all must match in order to define the downstream. Set and_ids = 1; // A set of identifiers at least one must match in order to define the downstream. Set or_ids = 2; // When any is set, it matches any downstream. bool any = 3 [(validate.rules).bool = {const: true}]; // Authenticated attributes that identify the downstream. Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. // This address will honor proxy protocol, but will not honor XFF. api.v2.core.CidrRange source_ip = 5 [deprecated = true]; // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the // :ref:`remote_ip ` is inferred // from for example the x-forwarder-for header, proxy protocol, etc. api.v2.core.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the // :ref:`direct_remote_ip `. // E.g, if the remote ip is inferred from for example the x-forwarder-for header, // proxy protocol, etc. api.v2.core.CidrRange remote_ip = 11; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` // field if you want to match the URL path without the query and fragment string. api.v2.route.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. type.matcher.PathMatcher url_path = 9; // Metadata that describes additional information about the principal. type.matcher.MetadataMatcher metadata = 7; // Negates matching the provided principal. For instance, if the value of `not_id` would match, // this principal would not match. Conversely, if the value of `not_id` would not match, this // principal would match. Principal not_id = 8; } } ================================================ FILE: api/envoy/config/rbac/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/rbac/v2:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) ================================================ FILE: api/envoy/config/rbac/v3/rbac.proto ================================================ syntax = "proto3"; package envoy.config.rbac.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/path.proto"; import "envoy/type/matcher/v3/string.proto"; import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.rbac.v3"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a // service. RBAC policies are additive. The policies are examined in order. Requests are allowed // or denied based on the `action` and whether a matching policy is found. For instance, if the // action is ALLOW and a matching policy is found the request should be allowed. // // RBAC can also be used to make access logging decisions by communicating with access loggers // through dynamic metadata. When the action is LOG and at least one policy matches, the // `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating // the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // // * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so // does "cluster.local/ns/default/sa/superuser". // // * Any user can read ("GET") the service at paths with prefix "/products", so long as the // destination port is either 80 or 443. // // .. code-block:: yaml // // action: ALLOW // policies: // "service-admin": // permissions: // - any: true // principals: // - authenticated: // principal_name: // exact: "cluster.local/ns/default/sa/admin" // - authenticated: // principal_name: // exact: "cluster.local/ns/default/sa/superuser" // "product-viewer": // permissions: // - and_rules: // rules: // - header: { name: ":method", exact_match: "GET" } // - url_path: // path: { prefix: "/products" } // - or_rules: // rules: // - destination_port: 80 // - destination_port: 443 // principals: // - any: true // message RBAC { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.RBAC"; // Should we do safe-list or block-list style access control? enum Action { // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; // The policies set the `access_log_hint` dynamic metadata key based on if requests match. // All requests are allowed. LOG = 2; } // The action to take if a policy matches. Every action either allows or denies a request, // and can also carry out action-specific operations. // // Actions: // // * ALLOW: Allows the request if and only if there is a policy that matches // the request. // * DENY: Allows the request if and only if there are no policies that // match the request. // * LOG: Allows all requests. If at least one policy matches, the dynamic // metadata key `access_log_hint` is set to the value `true` under the shared // key namespace 'envoy.common'. If no policies match, it is set to `false`. // Other actions do not modify this key. // Action action = 1 [(validate.rules).enum = {defined_only: true}]; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } // Policy specifies a role and the principals that are assigned/denied the role. // A policy matches if and only if at least one of its permissions match the // action taking place AND at least one of its principals match the downstream // AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Policy"; // Required. The set of permissions that define a role. Each permission is // matched with OR semantics. To match all actions for this policy, a single // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; // Required. The set of principals that are assigned/denied the role based on // “action”. Each principal is matched with OR semantics. To match all // downstreams for this policy, a single Principal with the `any` field set to // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; // An optional symbolic expression specifying an access control // :ref:`condition `. The condition is combined // with the permissions and the principals as a clause with AND semantics. // Only be used when checked_condition is not used. google.api.expr.v1alpha1.Expr condition = 3 [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; // [#not-implemented-hide:] // An optional symbolic expression that has been successfully type checked. // Only be used when condition is not used. google.api.expr.v1alpha1.CheckedExpr checked_condition = 4 [(udpa.annotations.field_migrate).oneof_promotion = "expression_specifier"]; } // Permission defines an action (or actions) that a principal can take. // [#next-free-field: 11] message Permission { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission"; // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, // each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Permission.Set"; repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; } oneof rule { option (validate.required) = true; // A set of rules that all must match in order to define the action. Set and_rules = 1; // A set of rules where at least one must match in order to define the action. Set or_rules = 2; // When any is set, it matches any action. bool any = 3 [(validate.rules).bool = {const: true}]; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` // field if you want to match the URL path without the query and fragment string. route.v3.HeaderMatcher header = 4; // A URL path on the incoming HTTP request. Only available for HTTP. type.matcher.v3.PathMatcher url_path = 10; // A CIDR block that describes the destination IP. core.v3.CidrRange destination_ip = 5; // A port number that describes the destination port connecting to. uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. type.matcher.v3.MetadataMatcher metadata = 7; // Negates matching the provided permission. For instance, if the value of // `not_rule` would match, this permission would not match. Conversely, if // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is // typically TLS SNI. // // .. attention:: // // The behavior of this field may be affected by how Envoy is configured // as explained below. // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for // the :ref:`server name // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // // * A :ref:`listener filter ` may // overwrite a connection's requested server name within Envoy. // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. type.matcher.v3.StringMatcher requested_server_name = 9; } } // Principal defines an identity or a group of identities for a downstream // subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal"; // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal.Set"; repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; } // Authentication attributes for a downstream. message Authenticated { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v2.Principal.Authenticated"; reserved 1; // The name of the principal. If set, The URI SAN or DNS SAN in that order // is used from the certificate, otherwise the subject field is used. If // unset, it applies to any user that is authenticated. type.matcher.v3.StringMatcher principal_name = 2; } oneof identifier { option (validate.required) = true; // A set of identifiers that all must match in order to define the // downstream. Set and_ids = 1; // A set of identifiers at least one must match in order to define the // downstream. Set or_ids = 2; // When any is set, it matches any downstream. bool any = 3 [(validate.rules).bool = {const: true}]; // Authenticated attributes that identify the downstream. Authenticated authenticated = 4; // A CIDR block that describes the downstream IP. // This address will honor proxy protocol, but will not honor XFF. core.v3.CidrRange source_ip = 5 [deprecated = true]; // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the // :ref:`remote_ip ` is // inferred from for example the x-forwarder-for header, proxy protocol, // etc. core.v3.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the // :ref:`direct_remote_ip // `. E.g, if the // remote ip is inferred from for example the x-forwarder-for header, proxy // protocol, etc. core.v3.CidrRange remote_ip = 11; // A header (or pseudo-header such as :path or :method) on the incoming HTTP // request. Only available for HTTP request. Note: the pseudo-header :path // includes the query and fragment string. Use the `url_path` field if you // want to match the URL path without the query and fragment string. route.v3.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. type.matcher.v3.PathMatcher url_path = 9; // Metadata that describes additional information about the principal. type.matcher.v3.MetadataMatcher metadata = 7; // Negates matching the provided principal. For instance, if the value of // `not_id` would match, this principal would not match. Conversely, if the // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } ================================================ FILE: api/envoy/config/rbac/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/rbac/v3:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_google_googleapis//google/api/expr/v1alpha1:checked_proto", "@com_google_googleapis//google/api/expr/v1alpha1:syntax_proto", ], ) ================================================ FILE: api/envoy/config/rbac/v4alpha/rbac.proto ================================================ syntax = "proto3"; package envoy.config.rbac.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "envoy/type/matcher/v4alpha/metadata.proto"; import "envoy/type/matcher/v4alpha/path.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "google/api/expr/v1alpha1/checked.proto"; import "google/api/expr/v1alpha1/syntax.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.rbac.v4alpha"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Role Based Access Control (RBAC)] // Role Based Access Control (RBAC) provides service-level and method-level access control for a // service. RBAC policies are additive. The policies are examined in order. Requests are allowed // or denied based on the `action` and whether a matching policy is found. For instance, if the // action is ALLOW and a matching policy is found the request should be allowed. // // RBAC can also be used to make access logging decisions by communicating with access loggers // through dynamic metadata. When the action is LOG and at least one policy matches, the // `access_log_hint` value in the shared key namespace 'envoy.common' is set to `true` indicating // the request should be logged. // // Here is an example of RBAC configuration. It has two policies: // // * Service account "cluster.local/ns/default/sa/admin" has full access to the service, and so // does "cluster.local/ns/default/sa/superuser". // // * Any user can read ("GET") the service at paths with prefix "/products", so long as the // destination port is either 80 or 443. // // .. code-block:: yaml // // action: ALLOW // policies: // "service-admin": // permissions: // - any: true // principals: // - authenticated: // principal_name: // exact: "cluster.local/ns/default/sa/admin" // - authenticated: // principal_name: // exact: "cluster.local/ns/default/sa/superuser" // "product-viewer": // permissions: // - and_rules: // rules: // - header: { name: ":method", exact_match: "GET" } // - url_path: // path: { prefix: "/products" } // - or_rules: // rules: // - destination_port: 80 // - destination_port: 443 // principals: // - any: true // message RBAC { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.RBAC"; // Should we do safe-list or block-list style access control? enum Action { // The policies grant access to principals. The rest are denied. This is safe-list style // access control. This is the default type. ALLOW = 0; // The policies deny access to principals. The rest are allowed. This is block-list style // access control. DENY = 1; // The policies set the `access_log_hint` dynamic metadata key based on if requests match. // All requests are allowed. LOG = 2; } // The action to take if a policy matches. Every action either allows or denies a request, // and can also carry out action-specific operations. // // Actions: // // * ALLOW: Allows the request if and only if there is a policy that matches // the request. // * DENY: Allows the request if and only if there are no policies that // match the request. // * LOG: Allows all requests. If at least one policy matches, the dynamic // metadata key `access_log_hint` is set to the value `true` under the shared // key namespace 'envoy.common'. If no policies match, it is set to `false`. // Other actions do not modify this key. // Action action = 1 [(validate.rules).enum = {defined_only: true}]; // Maps from policy name to policy. A match occurs when at least one policy matches the request. map policies = 2; } // Policy specifies a role and the principals that are assigned/denied the role. // A policy matches if and only if at least one of its permissions match the // action taking place AND at least one of its principals match the downstream // AND the condition is true if specified. message Policy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Policy"; // Required. The set of permissions that define a role. Each permission is // matched with OR semantics. To match all actions for this policy, a single // Permission with the `any` field set to true should be used. repeated Permission permissions = 1 [(validate.rules).repeated = {min_items: 1}]; // Required. The set of principals that are assigned/denied the role based on // “action”. Each principal is matched with OR semantics. To match all // downstreams for this policy, a single Principal with the `any` field set to // true should be used. repeated Principal principals = 2 [(validate.rules).repeated = {min_items: 1}]; oneof expression_specifier { // An optional symbolic expression specifying an access control // :ref:`condition `. The condition is combined // with the permissions and the principals as a clause with AND semantics. // Only be used when checked_condition is not used. google.api.expr.v1alpha1.Expr condition = 3; // [#not-implemented-hide:] // An optional symbolic expression that has been successfully type checked. // Only be used when condition is not used. google.api.expr.v1alpha1.CheckedExpr checked_condition = 4; } } // Permission defines an action (or actions) that a principal can take. // [#next-free-field: 11] message Permission { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Permission"; // Used in the `and_rules` and `or_rules` fields in the `rule` oneof. Depending on the context, // each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Permission.Set"; repeated Permission rules = 1 [(validate.rules).repeated = {min_items: 1}]; } oneof rule { option (validate.required) = true; // A set of rules that all must match in order to define the action. Set and_rules = 1; // A set of rules where at least one must match in order to define the action. Set or_rules = 2; // When any is set, it matches any action. bool any = 3 [(validate.rules).bool = {const: true}]; // A header (or pseudo-header such as :path or :method) on the incoming HTTP request. Only // available for HTTP request. // Note: the pseudo-header :path includes the query and fragment string. Use the `url_path` // field if you want to match the URL path without the query and fragment string. route.v4alpha.HeaderMatcher header = 4; // A URL path on the incoming HTTP request. Only available for HTTP. type.matcher.v4alpha.PathMatcher url_path = 10; // A CIDR block that describes the destination IP. core.v4alpha.CidrRange destination_ip = 5; // A port number that describes the destination port connecting to. uint32 destination_port = 6 [(validate.rules).uint32 = {lte: 65535}]; // Metadata that describes additional information about the action. type.matcher.v4alpha.MetadataMatcher metadata = 7; // Negates matching the provided permission. For instance, if the value of // `not_rule` would match, this permission would not match. Conversely, if // the value of `not_rule` would not match, this permission would match. Permission not_rule = 8; // The request server from the client's connection request. This is // typically TLS SNI. // // .. attention:: // // The behavior of this field may be affected by how Envoy is configured // as explained below. // // * If the :ref:`TLS Inspector ` // filter is not added, and if a `FilterChainMatch` is not defined for // the :ref:`server name // `, // a TLS connection's requested SNI server name will be treated as if it // wasn't present. // // * A :ref:`listener filter ` may // overwrite a connection's requested server name within Envoy. // // Please refer to :ref:`this FAQ entry ` to learn to // setup SNI. type.matcher.v4alpha.StringMatcher requested_server_name = 9; } } // Principal defines an identity or a group of identities for a downstream // subject. // [#next-free-field: 12] message Principal { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal"; // Used in the `and_ids` and `or_ids` fields in the `identifier` oneof. // Depending on the context, each are applied with the associated behavior. message Set { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal.Set"; repeated Principal ids = 1 [(validate.rules).repeated = {min_items: 1}]; } // Authentication attributes for a downstream. message Authenticated { option (udpa.annotations.versioning).previous_message_type = "envoy.config.rbac.v3.Principal.Authenticated"; reserved 1; // The name of the principal. If set, The URI SAN or DNS SAN in that order // is used from the certificate, otherwise the subject field is used. If // unset, it applies to any user that is authenticated. type.matcher.v4alpha.StringMatcher principal_name = 2; } reserved 5; reserved "source_ip"; oneof identifier { option (validate.required) = true; // A set of identifiers that all must match in order to define the // downstream. Set and_ids = 1; // A set of identifiers at least one must match in order to define the // downstream. Set or_ids = 2; // When any is set, it matches any downstream. bool any = 3 [(validate.rules).bool = {const: true}]; // Authenticated attributes that identify the downstream. Authenticated authenticated = 4; // A CIDR block that describes the downstream remote/origin address. // Note: This is always the physical peer even if the // :ref:`remote_ip ` is // inferred from for example the x-forwarder-for header, proxy protocol, // etc. core.v4alpha.CidrRange direct_remote_ip = 10; // A CIDR block that describes the downstream remote/origin address. // Note: This may not be the physical peer and could be different from the // :ref:`direct_remote_ip // `. E.g, if the // remote ip is inferred from for example the x-forwarder-for header, proxy // protocol, etc. core.v4alpha.CidrRange remote_ip = 11; // A header (or pseudo-header such as :path or :method) on the incoming HTTP // request. Only available for HTTP request. Note: the pseudo-header :path // includes the query and fragment string. Use the `url_path` field if you // want to match the URL path without the query and fragment string. route.v4alpha.HeaderMatcher header = 6; // A URL path on the incoming HTTP request. Only available for HTTP. type.matcher.v4alpha.PathMatcher url_path = 9; // Metadata that describes additional information about the principal. type.matcher.v4alpha.MetadataMatcher metadata = 7; // Negates matching the provided principal. For instance, if the value of // `not_id` would match, this principal would not match. Conversely, if the // value of `not_id` would not match, this principal would match. Principal not_id = 8; } } ================================================ FILE: api/envoy/config/resource_monitor/fixed_heap/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/resource_monitor/fixed_heap/v2alpha/fixed_heap.proto ================================================ syntax = "proto3"; package envoy.config.resource_monitor.fixed_heap.v2alpha; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.resource_monitor.fixed_heap.v2alpha"; option java_outer_classname = "FixedHeapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Fixed heap] // [#extension: envoy.resource_monitors.fixed_heap] // The fixed heap resource monitor reports the Envoy process memory pressure, computed as a // fraction of currently reserved heap memory divided by a statically configured maximum // specified in the FixedHeapConfig. message FixedHeapConfig { uint64 max_heap_size_bytes = 1 [(validate.rules).uint64 = {gt: 0}]; } ================================================ FILE: api/envoy/config/resource_monitor/injected_resource/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/resource_monitor/injected_resource/v2alpha/injected_resource.proto ================================================ syntax = "proto3"; package envoy.config.resource_monitor.injected_resource.v2alpha; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.resource_monitor.injected_resource.v2alpha"; option java_outer_classname = "InjectedResourceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Injected resource] // [#extension: envoy.resource_monitors.injected_resource] // The injected resource monitor allows injecting a synthetic resource pressure into Envoy // via a text file, which must contain a floating-point number in the range [0..1] representing // the resource pressure and be updated atomically by a symbolic link swap. // This is intended primarily for integration tests to force Envoy into an overloaded state. message InjectedResourceConfig { string filename = 1 [(validate.rules).string = {min_bytes: 1}]; } ================================================ FILE: api/envoy/config/retry/omit_canary_hosts/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/retry/omit_canary_hosts/v2/omit_canary_hosts.proto ================================================ syntax = "proto3"; package envoy.config.retry.omit_canary_hosts.v2; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.retry.omit_canary_hosts.v2"; option java_outer_classname = "OmitCanaryHostsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Omit Canary Hosts Predicate] // [#extension: envoy.retry_host_predicates.omit_canary_hosts] message OmitCanaryHostsPredicate { } ================================================ FILE: api/envoy/config/retry/omit_host_metadata/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/retry/omit_host_metadata/v2/omit_host_metadata_config.proto ================================================ syntax = "proto3"; package envoy.config.retry.omit_host_metadata.v2; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.retry.omit_host_metadata.v2"; option java_outer_classname = "OmitHostMetadataConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.retry.host.omit_host_metadata.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Omit host metadata retry predicate] // A retry host predicate that can be used to reject a host based on // predefined metadata match criteria. // [#extension: envoy.retry_host_predicates.omit_host_metadata] message OmitHostMetadataConfig { // Retry host predicate metadata match criteria. The hosts in // the upstream cluster with matching metadata will be omitted while // attempting a retry of a failed request. The metadata should be specified // under the *envoy.lb* key. api.v2.core.Metadata metadata_match = 1; } ================================================ FILE: api/envoy/config/retry/previous_hosts/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/retry/previous_hosts/v2/previous_hosts.proto ================================================ syntax = "proto3"; package envoy.config.retry.previous_hosts.v2; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.retry.previous_hosts.v2"; option java_outer_classname = "PreviousHostsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Previous Hosts Predicate] // [#extension: envoy.retry_host_predicates.previous_hosts] message PreviousHostsPredicate { } ================================================ FILE: api/envoy/config/retry/previous_priorities/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/retry/previous_priorities/previous_priorities_config.proto ================================================ syntax = "proto3"; package envoy.config.retry.previous_priorities; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.retry.previous_priorities"; option java_outer_classname = "PreviousPrioritiesConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Previous priorities retry selector] // A retry host selector that attempts to spread retries between priorities, even if certain // priorities would not normally be attempted due to higher priorities being available. // // As priorities get excluded, load will be distributed amongst the remaining healthy priorities // based on the relative health of the priorities, matching how load is distributed during regular // host selection. For example, given priority healths of {100, 50, 50}, the original load will be // {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load // changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the // remaining to spill over to P2. // // Each priority attempted will be excluded until there are no healthy priorities left, at which // point the list of attempted priorities will be reset, essentially starting from the beginning. // For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the // following sequence of priorities would be selected (assuming update_frequency = 1): // Attempt 1: P0 (P0 is 100% healthy) // Attempt 2: P2 (P0 already attempted, P2 only healthy priority) // Attempt 3: P0 (no healthy priorities, reset) // Attempt 4: P2 // // In the case of all upstream hosts being unhealthy, no adjustments will be made to the original // priority load, so behavior should be identical to not using this plugin. // // Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of // priorities), which might incur significant overhead for clusters with many priorities. // [#extension: envoy.retry_priorities.previous_priorities] message PreviousPrioritiesConfig { // How often the priority load should be updated based on previously attempted priorities. Useful // to allow each priorities to receive more than one request before being excluded or to reduce // the number of times that the priority load has to be recomputed. // // For example, by setting this to 2, then the first two attempts (initial attempt and first // retry) will use the unmodified priority load. The third and fourth attempt will use priority // load which excludes the priorities routed to with the first two attempts, and the fifth and // sixth attempt will use the priority load excluding the priorities used for the first four // attempts. // // Must be greater than 0. int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}]; } ================================================ FILE: api/envoy/config/route/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2:pkg", "//envoy/api/v2/route:pkg", "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/route/v3/route.proto ================================================ syntax = "proto3"; package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP route configuration] // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` // [#next-free-field: 11] message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RouteConfiguration"; // The name of the route configuration. For example, it might match // :ref:`route_config_name // ` in // :ref:`envoy_api_msg_extensions.filters.network.http_connection_manager.v3.Rds`. string name = 1; // An array of virtual hosts that make up the route table. repeated VirtualHost virtual_hosts = 2; // An array of virtual hosts will be dynamically loaded via the VHDS API. // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for // on-demand discovery of virtual hosts. The contents of these two fields will be merged to // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration // taking precedence. Vhds vhds = 9; // Optionally specifies a list of HTTP headers that the connection manager // will consider to be internal only. If they are found on external requests they will be cleaned // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information. repeated string internal_only_headers = 3 [ (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} ]; // Specifies a list of HTTP headers that should be added to each response that // the connection manager encodes. Headers specified at this level are applied // after headers from any enclosed :ref:`envoy_api_msg_config.route.v3.VirtualHost` or // :ref:`envoy_api_msg_config.route.v3.RouteAction`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v3.HeaderValueOption response_headers_to_add = 4 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // that the connection manager encodes. repeated string response_headers_to_remove = 5 [ (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} ]; // Specifies a list of HTTP headers that should be added to each request // routed by the HTTP connection manager. Headers specified at this level are // applied after headers from any enclosed :ref:`envoy_api_msg_config.route.v3.VirtualHost` or // :ref:`envoy_api_msg_config.route.v3.RouteAction`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v3.HeaderValueOption request_headers_to_add = 6 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // routed by the HTTP connection manager. repeated string request_headers_to_remove = 8 [ (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} ]; // By default, headers that should be added/removed are evaluated from most to least specific: // // * route level // * virtual host level // * connection manager level // // To allow setting overrides at the route or virtual host level, this order can be reversed // by setting this option to true. Defaults to false. // // [#next-major-version: In the v3 API, this will default to true.] bool most_specific_header_mutations_wins = 10; // An optional boolean that specifies whether the clusters that the route // table refers to will be validated by the cluster manager. If set to true // and a route refers to a non-existent cluster, the route table will not // load. If set to false and a route refers to a non-existent cluster, the // route table will load and the router filter will return a 404 if the route // is selected at runtime. This setting defaults to true if the route table // is statically defined via the :ref:`route_config // ` // option. This setting default to false if the route table is loaded dynamically via the // :ref:`rds // ` // option. Users may wish to override the default behavior in certain cases (for example when // using CDS with a static route table). google.protobuf.BoolValue validate_clusters = 7; } message Vhds { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Vhds"; // Configuration source specifier for VHDS. core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/route/v3/route_components.proto ================================================ syntax = "proto3"; package envoy.config.route.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; import "envoy/type/matcher/v3/regex.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v3"; option java_outer_classname = "RouteComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP route components] // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` // The top level element in the routing configuration is a virtual host. Each virtual host has // a logical name as well as a set of domains that get routed to it based on the incoming request's // host header. This allows a single listener to service multiple top level domain path trees. Once // a virtual host is selected based on the domain, the routes are processed in order to see which // upstream cluster to route to or whether to perform a redirect. // [#next-free-field: 21] message VirtualHost { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualHost"; enum TlsRequirementType { // No TLS requirement for the virtual host. NONE = 0; // External requests must use TLS. If a request is external and it is not // using TLS, a 301 redirect will be sent telling the client to use HTTPS. EXTERNAL_ONLY = 1; // All requests must use TLS. If a request is not using TLS, a 301 redirect // will be sent telling the client to use HTTPS. ALL = 2; } reserved 9, 12; reserved "per_filter_config"; // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. string name = 1 [(validate.rules).string = {min_len: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. // // Domain search order: // 1. Exact domain names: ``www.foo.com``. // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. // 4. Special wildcard ``*`` matching any domain. // // .. note:: // // The wildcard will not match the empty string. // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. // The longest wildcards match first. // Only a single virtual host in the entire route configuration can match on ``*``. A domain // must be unique across all virtual hosts or the config will fail to load. // // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. repeated string domains = 2 [(validate.rules).repeated = { min_items: 1 items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} }]; // The list of routes that will be matched, in order, for incoming requests. // The first route that matches will be used. repeated Route routes = 3; // Specifies the type of TLS enforcement the virtual host expects. If this option is not // specified, there is no TLS requirement for the virtual host. TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; // A list of virtual clusters defined for this virtual host. Virtual clusters // are used for additional statistics gathering. repeated VirtualCluster virtual_clusters = 5; // Specifies a set of rate limit configurations that will be applied to the // virtual host. repeated RateLimit rate_limits = 6; // Specifies a list of HTTP headers that should be added to each request // handled by this virtual host. Headers specified at this level are applied // after headers from enclosed :ref:`envoy_api_msg_config.route.v3.Route` and before headers from the // enclosing :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v3.HeaderValueOption request_headers_to_add = 7 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of HTTP headers that should be added to each response // handled by this virtual host. Headers specified at this level are applied // after headers from enclosed :ref:`envoy_api_msg_config.route.v3.Route` and before headers from the // enclosing :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v3.HeaderValueOption response_headers_to_add = 10 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; // The per_filter_config field can be used to provide virtual host-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. map typed_per_filter_config = 15; // Decides whether the :ref:`x-envoy-attempt-count // ` header should be included // in the upstream request. Setting this option will cause it to override any existing header // value, so in the case of two Envoys on the request path with this option enabled, the upstream // will see the attempt count as perceived by the second Envoy. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers // ` flag. // // [#next-major-version: rename to include_attempt_count_in_request.] bool include_request_attempt_count = 14; // Decides whether the :ref:`x-envoy-attempt-count // ` header should be included // in the downstream response. Setting this option will cause the router to override any existing header // value, so in the case of two Envoys on the request path with this option enabled, the downstream // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers // ` flag. bool include_attempt_count_in_response = 19; // Indicates the retry policy for all routes in this virtual host. Note that setting a // route level entry will take precedence over this config and it'll be treated // independently (e.g.: values are not inherited). RetryPolicy retry_policy = 16; // [#not-implemented-hide:] // Specifies the configuration for retry policy extension. Note that setting a route level entry // will take precedence over this config and it'll be treated independently (e.g.: values are not // inherited). :ref:`Retry policy ` should not be // set if this field is used. google.protobuf.Any retry_policy_typed_config = 20; // Indicates the hedge policy for all routes in this virtual host. Note that setting a // route level entry will take precedence over this config and it'll be treated // independently (e.g.: values are not inherited). HedgePolicy hedge_policy = 17; // The maximum bytes which will be buffered for retries and shadowing. // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum // value of this and the listener per_connection_buffer_limit_bytes. google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; } // A filter-defined action type. message FilterAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.FilterAction"; google.protobuf.Any action = 1; } // A route is both a specification of how to match a request as well as an indication of what to do // next (e.g., redirect, forward, rewrite, etc.). // // .. attention:: // // Envoy supports routing on HTTP method via :ref:`header matching // `. // [#next-free-field: 18] message Route { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Route"; reserved 6, 8; reserved "per_filter_config"; // Name for the route. string name = 14; // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; oneof action { option (validate.required) = true; // Route request to some upstream cluster. RouteAction route = 2; // Return a redirect. RedirectAction redirect = 3; // Return an arbitrary HTTP response directly, without proxying. DirectResponseAction direct_response = 7; // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when // implemented] FilterAction filter_action = 17; } // The Metadata field can be used to provide additional information // about the route. It can be used for configuration, stats, and logging. // The metadata should go under the filter namespace that will need it. // For instance, if the metadata is intended for the Router filter, // the filter name should be specified as *envoy.filters.http.router*. core.v3.Metadata metadata = 4; // Decorator for the matched route. Decorator decorator = 5; // The typed_per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` for // if and how it is utilized. map typed_per_filter_config = 13; // Specifies a set of headers that will be added to requests matching this // route. Headers specified at this level are applied before headers from the // enclosing :ref:`envoy_api_msg_config.route.v3.VirtualHost` and // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v3.HeaderValueOption request_headers_to_add = 9 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // matching this route. repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a set of headers that will be added to responses to requests // matching this route. Headers specified at this level are applied before // headers from the enclosing :ref:`envoy_api_msg_config.route.v3.VirtualHost` and // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on // :ref:`custom request headers `. repeated core.v3.HeaderValueOption response_headers_to_add = 10 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. Tracing tracing = 15; // The maximum bytes which will be buffered for retries and shadowing. // If set, the bytes actually buffered will be the minimum value of this and the // listener per_connection_buffer_limit_bytes. google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; } // Compared to the :ref:`cluster ` field that specifies a // single upstream cluster as the target of a request, the :ref:`weighted_clusters // ` option allows for specification of // multiple upstream clusters along with weights that indicate the percentage of // traffic to be forwarded to each cluster. The router selects an upstream cluster based on the // weights. message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster"; // [#next-free-field: 11] message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.WeightedCluster.ClusterWeight"; reserved 7, 8; reserved "per_filter_config"; // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. string name = 1 [(validate.rules).string = {min_len: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, // the choice of an upstream cluster is determined by its weight. The sum of weights across all // entries in the clusters array must add up to the total_weight, which defaults to 100. google.protobuf.UInt32Value weight = 2; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field will be considered for // load balancing. Note that this will be merged with what's provided in // :ref:`RouteAction.metadata_match `, with // values here taking precedence. The filter name should be specified as *envoy.lb*. core.v3.Metadata metadata_match = 3; // Specifies a list of headers to be added to requests when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. // Headers specified at this level are applied before headers from the enclosing // :ref:`envoy_api_msg_config.route.v3.Route`, :ref:`envoy_api_msg_config.route.v3.VirtualHost`, and // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v3.HeaderValueOption request_headers_to_add = 4 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of headers to be added to responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. // Headers specified at this level are applied before headers from the enclosing // :ref:`envoy_api_msg_config.route.v3.Route`, :ref:`envoy_api_msg_config.route.v3.VirtualHost`, and // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v3.HeaderValueOption response_headers_to_add = 5 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v3.RouteAction`. repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. map typed_per_filter_config = 10; } // Specifies one or more upstream clusters associated with the route. repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; // Specifies the total weight across all clusters. The sum of all cluster weights must equal this // value, which must be greater than 0. Defaults to 100. google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; // Specifies the runtime key prefix that should be used to construct the // runtime keys associated with each cluster. When the *runtime_key_prefix* is // specified, the router will look for weights associated with each upstream // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where // *cluster[i]* denotes an entry in the clusters array field. If the runtime // key for the cluster does not exist, the value specified in the // configuration file will be used as the default weight. See the :ref:`runtime documentation // ` for how key names map to the underlying implementation. string runtime_key_prefix = 2; } // [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch"; message GrpcRouteMatchOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch.GrpcRouteMatchOptions"; } message TlsContextMatchOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteMatch.TlsContextMatchOptions"; // If specified, the route will match against whether or not a certificate is presented. // If not specified, certificate presentation status (true or false) will not be considered when route matching. google.protobuf.BoolValue presented = 1; // If specified, the route will match against whether or not a certificate is validated. // If not specified, certificate validation status (true or false) will not be considered when route matching. google.protobuf.BoolValue validated = 2; } // An extensible message for matching CONNECT requests. message ConnectMatcher { } reserved 5, 3; reserved "regex"; oneof path_specifier { option (validate.required) = true; // If specified, the route is a prefix rule meaning that the prefix must // match the beginning of the *:path* header. string prefix = 1; // If specified, the route is an exact path rule meaning that the path must // exactly match the *:path* header once the query string is removed. string path = 2; // If specified, the route is a regular expression rule meaning that the // regex must match the *:path* header once the query string is removed. The entire path // (without the query string) must match the regex. The rule will not match if only a // subsequence of the *:path* header matches the regex. // // [#next-major-version: In the v3 API we should redo how path specification works such // that we utilize StringMatcher, and additionally have consistent options around whether we // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive // to deprecate the existing options. We should even consider whether we want to do away with // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] type.matcher.v3.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; // If this is used as the matcher, the matcher will only match CONNECT requests. // Note that this will not match HTTP/2 upgrade-style CONNECT requests // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style // upgrades. // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, // where Extended CONNECT requests may have a path, the path matchers will work if // there is a path present. // Note that CONNECT support is currently considered alpha in Envoy. // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectMatcher connect_matcher = 12; } // Indicates that prefix/path matching should be case sensitive. The default // is true. google.protobuf.BoolValue case_sensitive = 4; // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the // number is <= the value of the numerator N, or if the key is not present, the default // value, the router continues to evaluate the remaining match criteria. A runtime_fraction // route configuration can be used to roll out route changes in a gradual manner without full // code/config deploys. Refer to the :ref:`traffic shifting // ` docs for additional documentation. // // .. note:: // // Parsing this field is implemented such that the runtime key's data may be represented // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an // integer with the assumption that the value is an integral percentage out of 100. For // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. core.v3.RuntimeFractionalPercent runtime_fraction = 9; // Specifies a set of headers that the route should match on. The router will // check the request’s headers against all the specified headers in the route // config. A match will happen if all the headers in the route are present in // the request with the same values (or based on presence if the value field // is not in the config). repeated HeaderMatcher headers = 6; // Specifies a set of URL query parameters on which the route should // match. The router will check the query string from the *path* header // against all the specified query parameters. If the number of specified // query parameters is nonzero, they all must match the *path* header's // query string for a match to occur. repeated QueryParameterMatcher query_parameters = 7; // If specified, only gRPC requests will be matched. The router will check // that the content-type header has a application/grpc or one of the various // application/grpc+ values. GrpcRouteMatchOptions grpc = 8; // If specified, the client tls context will be matched against the defined // match options. // // [#next-major-version: unify with RBAC] TlsContextMatchOptions tls_context = 11; } // [#next-free-field: 12] message CorsPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.CorsPolicy"; reserved 1, 8, 7; reserved "allow_origin", "allow_origin_regex", "enabled"; // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. repeated type.matcher.v3.StringMatcher allow_origin_string_match = 11; // Specifies the content for the *access-control-allow-methods* header. string allow_methods = 2; // Specifies the content for the *access-control-allow-headers* header. string allow_headers = 3; // Specifies the content for the *access-control-expose-headers* header. string expose_headers = 4; // Specifies the content for the *access-control-max-age* header. string max_age = 5; // Specifies whether the resource allows credentials. google.protobuf.BoolValue allow_credentials = 6; oneof enabled_specifier { // Specifies the % of requests for which the CORS filter is enabled. // // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS // filter will be enabled for 100% of the requests. // // If :ref:`runtime_key ` is // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. core.v3.RuntimeFractionalPercent filter_enabled = 9; } // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not // enforced. // // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those // fields have to explicitly disable the filter in order for this setting to take effect. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate // and track the request's *Origin* to determine if it's valid but will not enforce any policies. core.v3.RuntimeFractionalPercent shadow_enabled = 10; } // [#next-free-field: 37] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction"; enum ClusterNotFoundResponseCode { // HTTP status code - 503 Service Unavailable. SERVICE_UNAVAILABLE = 0; // HTTP status code - 404 Not Found. NOT_FOUND = 1; } // Configures :ref:`internal redirect ` behavior. // [#next-major-version: remove this definition - it's defined in the InternalRedirectPolicy message.] enum InternalRedirectAction { option deprecated = true; PASS_THROUGH_INTERNAL_REDIRECT = 0; HANDLE_INTERNAL_REDIRECT = 1; } // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are // collected for the shadow cluster making this feature useful for testing. // // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. // // .. note:: // // Shadowing will not be triggered if the primary cluster does not exist. message RequestMirrorPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.RequestMirrorPolicy"; reserved 2; reserved "runtime_key"; // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // If not specified, all requests to the target cluster will be mirrored. // // If specified, this field takes precedence over the `runtime_key` field and requests must also // fall under the percentage of matches indicated by this field. // // For some fraction N/D, a random number in the range [0,D) is selected. If the // number is <= the value of the numerator N, or if the key is not present, the default // value, the request will be mirrored. core.v3.RuntimeFractionalPercent runtime_fraction = 3; // Determines if the trace span should be sampled. Defaults to true. google.protobuf.BoolValue trace_sampled = 4; } // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer // `. // [#next-free-field: 7] message HashPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.HashPolicy"; message Header { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.HashPolicy.Header"; // The name of the request header that will be used to obtain the hash // key. If the request header is not present, no hash will be produced. string header_name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If specified, the request header value will be rewritten and used // to produce the hash key. type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: // // 1. Passive. Envoy takes a cookie that's present in the cookies header and // hashes on its value. // // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) // on the first request from the client in its response to the client, // based on the endpoint the request gets sent to. The client then // presents this on the next and all subsequent requests. The hash of // this is sufficient to ensure these requests get sent to the same // endpoint. The cookie is generated by hashing the source and // destination ports and addresses so that multiple independent HTTP2 // streams on the same connection will independently receive the same // cookie, even if they arrive at the Envoy simultaneously. message Cookie { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.HashPolicy.Cookie"; // The name of the cookie that will be used to obtain the hash key. If the // cookie is not present and ttl below is not set, no hash will be // produced. string name = 1 [(validate.rules).string = {min_len: 1}]; // If specified, a cookie with the TTL will be generated if the cookie is // not present. If the TTL is present and zero, the generated cookie will // be a session cookie. google.protobuf.Duration ttl = 2; // The name of the path for the cookie. If no path is specified here, no path // will be set for the cookie. string path = 3; } message ConnectionProperties { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.HashPolicy.ConnectionProperties"; // Hash on source IP address. bool source_ip = 1; } message QueryParameter { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.HashPolicy.QueryParameter"; // The name of the URL query parameter that will be used to obtain the hash // key. If the parameter is not present, no hash will be produced. Query // parameter names are case-sensitive. string name = 1 [(validate.rules).string = {min_len: 1}]; } message FilterState { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.HashPolicy.FilterState"; // The name of the Object in the per-request filterState, which is an // Envoy::Http::Hashable object. If there is no data associated with the key, // or the stored object is not Envoy::Http::Hashable, no hash will be produced. string key = 1 [(validate.rules).string = {min_len: 1}]; } oneof policy_specifier { option (validate.required) = true; // Header hash policy. Header header = 1; // Cookie hash policy. Cookie cookie = 2; // Connection properties hash policy. ConnectionProperties connection_properties = 3; // Query parameter hash policy. QueryParameter query_parameter = 5; // Filter state hash policy. FilterState filter_state = 6; } // The flag that short-circuits the hash computing. This field provides a // 'fallback' style of configuration: "if a terminal policy doesn't work, // fallback to rest of the policy list", it saves time when the terminal // policy works. // // If true, and there is already a hash computed, ignore rest of the // list of hash polices. // For example, if the following hash methods are configured: // // ========= ======== // specifier terminal // ========= ======== // Header A true // Header B false // Header C false // ========= ======== // // The generateHash process ends if policy "header A" generates a hash, as // it's a terminal policy. bool terminal = 4; } // Allows enabling and disabling upgrades on a per-route basis. // This overrides any enabled/disabled upgrade filter chain specified in the // HttpConnectionManager // :ref:`upgrade_configs // ` // but does not affect any custom filter chain specified there. message UpgradeConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RouteAction.UpgradeConfig"; // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. core.v3.ProxyProtocolConfig proxy_protocol_config = 1; } // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. string upgrade_type = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. // Note that CONNECT support is currently considered alpha in Envoy. // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectConfig connect_config = 3; } message MaxStreamDuration { // Specifies the maximum duration allowed for streams on the route. If not specified, the value // from the :ref:`max_stream_duration // ` field in // :ref:`HttpConnectionManager.common_http_protocol_options // ` // is used. If this field is set explicitly to zero, any // HttpConnectionManager max_stream_duration timeout will be disabled for // this route. google.protobuf.Duration max_stream_duration = 1; // If present, and the request contains a `grpc-timeout header // `_, use that value as the // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. // If set to 0, the `grpc-timeout` header is used without modification. google.protobuf.Duration grpc_timeout_header_max = 2; // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by // subtracting the provided duration from the header. This is useful for allowing Envoy to set // its global timeout to be less than that of the deadline imposed by the calling client, which // makes it more likely that Envoy will handle the timeout instead of having the call canceled // by the client. If, after applying the offset, the resulting timeout is zero or negative, // the stream will timeout immediately. google.protobuf.Duration grpc_timeout_header_offset = 3; } reserved 12, 18, 19, 16, 22, 21, 10; reserved "request_mirror_policy"; oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed // to. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the // header is not found or the referenced cluster does not exist, Envoy will // return a 404 response. // // .. attention:: // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. string cluster_header = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. See // :ref:`traffic splitting ` // for additional documentation. WeightedCluster weighted_clusters = 3; } // The HTTP status code to use when configured cluster is not found. // The default response code is 503 Service Unavailable. ClusterNotFoundResponseCode cluster_not_found_response_code = 20 [(validate.rules).enum = {defined_only: true}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what's set in this field will be considered // for load balancing. If using :ref:`weighted_clusters // `, metadata will be merged, with values // provided there taking precedence. The filter name should be specified as *envoy.lb*. core.v3.Metadata metadata_match = 4; // Indicates that during forwarding, the matched prefix (or path) should be // swapped with this value. This option allows application URLs to be rooted // at a different path from those exposed at the reverse proxy layer. The router filter will // place the original path before rewrite into the :ref:`x-envoy-original-path // ` header. // // Only one of *prefix_rewrite* or // :ref:`regex_rewrite ` // may be specified. // // .. attention:: // // Pay careful attention to the use of trailing slashes in the // :ref:`route's match ` prefix value. // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single // :ref:`Route `, as shown by the below config entries: // // .. code-block:: yaml // // - match: // prefix: "/prefix/" // route: // prefix_rewrite: "/" // - match: // prefix: "/prefix" // route: // prefix_rewrite: "/" // // Having above entries in the config, requests to */prefix* will be stripped to */*, while // requests to */prefix/etc* will be stripped to */etc*. string prefix_rewrite = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during forwarding, portions of the path that match the // pattern should be rewritten, even allowing the substitution of capture // groups from the pattern into the new path as specified by the rewrite // substitution string. This is useful to allow application paths to be // rewritten in a way that is aware of segments with variable content like // identifiers. The router filter will place the original path as it was // before the rewrite into the :ref:`x-envoy-original-path // ` header. // // Only one of :ref:`prefix_rewrite ` // or *regex_rewrite* may be specified. // // Examples using Google's `RE2 `_ engine: // // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` // into ``/v1/api/instance/foo``. // // * The pattern ``one`` paired with a substitution string of ``two`` would // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. // // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of // ``\1two\2`` would replace only the first occurrence of ``one``, // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. // // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. type.matcher.v3.RegexMatchAndSubstitute regex_rewrite = 32; oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with // this value. string host_rewrite_literal = 6 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during forwarding, the host header will be swapped with // the hostname of the upstream host chosen by the cluster manager. This // option is applicable only when the destination cluster for a route is of // type *strict_dns* or *logical_dns*. Setting this to true with other cluster // types has no effect. google.protobuf.BoolValue auto_host_rewrite = 7; // Indicates that during forwarding, the host header will be swapped with the content of given // downstream or :ref:`custom ` header. // If header value is empty, host header is left intact. // // .. attention:: // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. string host_rewrite_header = 29 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; // Indicates that during forwarding, the host header will be swapped with // the result of the regex substitution executed on path value with query and fragment removed. // This is useful for transitioning variable content between path segment and subdomain. // // For example with the following config: // // .. code-block:: yaml // // host_rewrite_path_regex: // pattern: // google_re2: {} // regex: "^/(.+)/.+$" // substitution: \1 // // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`. type.matcher.v3.RegexMatchAndSubstitute host_rewrite_path_regex = 35; } // Specifies the upstream timeout for the route. If not specified, the default is 15s. This // spans between the point at which the entire downstream request (i.e. end-of-stream) has been // processed and when the upstream response has been completely processed. A value of 0 will // disable the route's timeout. // // .. note:: // // This timeout includes all retries. See also // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. google.protobuf.Duration timeout = 8; // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, // although the connection manager wide :ref:`stream_idle_timeout // ` // will still apply. A value of 0 will completely disable the route's idle timeout, even if a // connection manager stream idle timeout is configured. // // The idle timeout is distinct to :ref:`timeout // `, which provides an upper bound // on the upstream response time; :ref:`idle_timeout // ` instead bounds the amount // of time the request's stream may be idle. // // After header decoding, the idle timeout will apply on downstream and // upstream request events. Each time an encode/decode event for headers or // data is processed for the stream, the timer will be reset. If the timeout // fires, the stream is terminated with a 408 Request Timeout error code if no // upstream response header has been received, otherwise a stream reset // occurs. google.protobuf.Duration idle_timeout = 24; // Indicates that the route has a retry policy. Note that if this is set, // it'll take precedence over the virtual host level retry policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). RetryPolicy retry_policy = 9; // [#not-implemented-hide:] // Specifies the configuration for retry policy extension. Note that if this is set, it'll take // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, // most internal one becomes the enforced policy). :ref:`Retry policy ` // should not be set if this field is used. google.protobuf.Any retry_policy_typed_config = 33; // Indicates that the route has request mirroring policies. repeated RequestMirrorPolicy request_mirror_policies = 30; // Optionally specifies the :ref:`routing priority `. core.v3.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; // Specifies a set of rate limit configurations that could be applied to the // route. repeated RateLimit rate_limits = 13; // Specifies if the rate limit filter should include the virtual host rate // limits. By default, if the route configured rate limits, the virtual host // :ref:`rate_limits ` are not applied to the // request. // // This field is deprecated. Please use :ref:`vh_rate_limits ` google.protobuf.BoolValue include_vh_rate_limits = 14 [deprecated = true]; // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to // route the request. The method of combination is deterministic such that // identical lists of hash policies will produce the same hash. Since a hash // policy examines specific parts of a request, it can fail to produce a hash // (i.e. if the hashed header is not present). If (and only if) all configured // hash policies fail to generate a hash, no hash will be produced for // the route. In this case, the behavior is the same as if no hash policies // were specified (i.e. the ring hash load balancer will choose a random // backend). If a hash policy has the "terminal" attribute set to true, and // there is already a hash generated, the hash is returned immediately, // ignoring the rest of the hash policy list. repeated HashPolicy hash_policy = 15; // Indicates that the route has a CORS policy. CorsPolicy cors = 17; // Deprecated by :ref:`grpc_timeout_header_max ` // If present, and the request is a gRPC request, use the // `grpc-timeout header `_, // or its default value (infinity) instead of // :ref:`timeout `, but limit the applied timeout // to the maximum value specified here. If configured as 0, the maximum allowed timeout for // gRPC requests is infinity. If not configured at all, the `grpc-timeout` header is not used // and gRPC requests time out like any other requests using // :ref:`timeout ` or its default. // This can be used to prevent unexpected upstream request timeouts due to potentially long // time gaps between gRPC request and response in gRPC streaming mode. // // .. note:: // // If a timeout is specified using :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, it takes // precedence over `grpc-timeout header `_, when // both are present. See also // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. google.protobuf.Duration max_grpc_timeout = 23 [deprecated = true]; // Deprecated by :ref:`grpc_timeout_header_offset `. // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by subtracting // the provided duration from the header. This is useful in allowing Envoy to set its global // timeout to be less than that of the deadline imposed by the calling client, which makes it more // likely that Envoy will handle the timeout instead of having the call canceled by the client. // The offset will only be applied if the provided grpc_timeout is greater than the offset. This // ensures that the offset will only ever decrease the timeout and never set it to 0 (meaning // infinity). google.protobuf.Duration grpc_timeout_offset = 28 [deprecated = true]; repeated UpgradeConfig upgrade_configs = 25; // If present, Envoy will try to follow an upstream redirect response instead of proxying the // response back to the downstream. An upstream redirect response is defined // by :ref:`redirect_response_codes // `. InternalRedirectPolicy internal_redirect_policy = 34; InternalRedirectAction internal_redirect_action = 26 [deprecated = true]; // An internal redirect is handled, iff the number of previous internal redirects that a // downstream request has encountered is lower than this value, and // :ref:`internal_redirect_action ` // is set to :ref:`HANDLE_INTERNAL_REDIRECT // ` // In the case where a downstream request is bounced among multiple routes by internal redirect, // the first route that hits this threshold, or has // :ref:`internal_redirect_action ` // set to // :ref:`PASS_THROUGH_INTERNAL_REDIRECT // ` // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. google.protobuf.UInt32Value max_internal_redirects = 31 [deprecated = true]; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). HedgePolicy hedge_policy = 27; // Specifies the maximum stream duration for this route. MaxStreamDuration max_stream_duration = 36; } // HTTP retry :ref:`architecture overview `. // [#next-free-field: 12] message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy"; enum ResetHeaderFormat { SECONDS = 0; UNIX_TIMESTAMP = 1; } message RetryPriority { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy.RetryPriority"; reserved 2; reserved "config"; string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; } } message RetryHostPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy.RetryHostPredicate"; reserved 2; reserved "config"; string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; } } message RetryBackOff { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RetryPolicy.RetryBackOff"; // Specifies the base interval between retries. This parameter is required and must be greater // than zero. Values less than 1 ms are rounded up to 1 ms. // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's // back-off algorithm. google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { required: true gt {} }]; // Specifies the maximum interval between retries. This parameter is optional, but must be // greater than or equal to the `base_interval` if set. The default is 10 times the // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion // of Envoy's back-off algorithm. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; } message ResetHeader { string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; } // A retry back-off strategy that applies when the upstream server rate limits // the request. // // Given this configuration: // // .. code-block:: yaml // // rate_limited_retry_back_off: // reset_headers: // - name: Retry-After // format: SECONDS // - name: X-RateLimit-Reset // format: UNIX_TIMESTAMP // max_interval: "300s" // // The following algorithm will apply: // // 1. If the response contains the header ``Retry-After`` its value must be on // the form ``120`` (an integer that represents the number of seconds to // wait before retrying). If so, this value is used as the back-off interval. // 2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its // value must be on the form ``1595320702`` (an integer that represents the // point in time at which to retry, as a Unix timestamp in seconds). If so, // the current time is subtracted from this value and the result is used as // the back-off interval. // 3. Otherwise, Envoy will use the default // :ref:`exponential back-off ` // strategy. // // No matter which format is used, if the resulting back-off interval exceeds // ``max_interval`` it is discarded and the next header in ``reset_headers`` // is tried. If a request timeout is configured for the route it will further // limit how long the request will be allowed to run. // // To prevent many clients retrying at the same point in time jitter is added // to the back-off interval, so the resulting interval is decided by taking: // ``random(interval, interval * 1.5)``. // // .. attention:: // // Configuring ``rate_limited_retry_back_off`` will not by itself cause a request // to be retried. You will still need to configure the right retry policy to match // the responses from the upstream server. message RateLimitedRetryBackOff { // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) // to match against the response. Headers are tried in order, and matched case // insensitive. The first header to be parsed successfully is used. If no headers // match the default exponential back-off is used instead. repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}]; // Specifies the maximum back off interval that Envoy will allow. If a reset // header contains an interval longer than this then it will be discarded and // the next header will be tried. Defaults to 300 seconds. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; } // Specifies the conditions under which retry takes place. These are the same // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. string retry_on = 1; // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. google.protobuf.UInt32Value num_retries = 2 [(udpa.annotations.field_migrate).rename = "max_retries"]; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. // // .. note:: // // If left unspecified, Envoy will use the global // :ref:`route timeout ` for the request. // Consequently, when using a :ref:`5xx ` based // retry policy, a request that times out will not be retried as the total timeout budget // would have been exhausted. google.protobuf.Duration per_try_timeout = 3; // Specifies an implementation of a RetryPriority which is used to determine the // distribution of load across priorities used for retries. Refer to // :ref:`retry plugin configuration ` for more details. RetryPriority retry_priority = 4; // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host // for retries. If any of the predicates reject the host, host selection will be reattempted. // Refer to :ref:`retry plugin configuration ` for more // details. repeated RetryHostPredicate retry_host_predicate = 5; // The maximum number of times host selection will be reattempted before giving up, at which // point the host that was last selected will be routed to. If unspecified, this will default to // retrying once. int64 host_selection_retry_max_attempts = 6; // HTTP status codes that should trigger a retry in addition to those specified by retry_on. repeated uint32 retriable_status_codes = 7; // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the // default base interval is 25 milliseconds or, if set, the current value of the // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` // describes Envoy's back-off algorithm. RetryBackOff retry_back_off = 8; // Specifies parameters that control a retry back-off strategy that is used // when the request is rate limited by the upstream server. The server may // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to // provide feedback to the client on how long to wait before retrying. If // configured, this back-off strategy will be used instead of the // default exponential back off strategy (configured using `retry_back_off`) // whenever a response includes the matching headers. RateLimitedRetryBackOff rate_limited_retry_back_off = 11; // HTTP response headers that trigger a retry if present in the response. A retry will be // triggered if any of the header matches match the upstream response headers. // The field is only consulted if 'retriable-headers' retry policy is active. repeated HeaderMatcher retriable_headers = 9; // HTTP headers which must be present in the request for retries to be attempted. repeated HeaderMatcher retriable_request_headers = 10; } // HTTP request hedging :ref:`architecture overview `. message HedgePolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HedgePolicy"; // Specifies the number of initial requests that should be sent upstream. // Must be at least 1. // Defaults to 1. // [#not-implemented-hide:] google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; // Specifies a probability that an additional upstream request should be sent // on top of what is specified by initial_requests. // Defaults to 0. // [#not-implemented-hide:] type.v3.FractionalPercent additional_request_chance = 2; // Indicates that a hedged request should be sent when the per-try timeout // is hit. This will only occur if the retry policy also indicates that a // timed out request should be retried. // Once a timed out request is retried due to per try timeout, the router // filter will ensure that it is not retried again even if the returned // response headers would otherwise be retried according the specified // :ref:`RetryPolicy `. // Defaults to false. bool hedge_on_per_try_timeout = 3; } // [#next-free-field: 9] message RedirectAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RedirectAction"; enum RedirectResponseCode { // Moved Permanently HTTP Status Code - 301. MOVED_PERMANENTLY = 0; // Found HTTP Status Code - 302. FOUND = 1; // See Other HTTP Status Code - 303. SEE_OTHER = 2; // Temporary Redirect HTTP Status Code - 307. TEMPORARY_REDIRECT = 3; // Permanent Redirect HTTP Status Code - 308. PERMANENT_REDIRECT = 4; } // When the scheme redirection take place, the following rules apply: // 1. If the source URI scheme is `http` and the port is explicitly // set to `:80`, the port will be removed after the redirection // 2. If the source URI scheme is `https` and the port is explicitly // set to `:443`, the port will be removed after the redirection oneof scheme_rewrite_specifier { // The scheme portion of the URL will be swapped with "https". bool https_redirect = 4; // The scheme portion of the URL will be swapped with this value. string scheme_redirect = 7; } // The host portion of the URL will be swapped with this value. string host_redirect = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The port value of the URL will be swapped with this value. uint32 port_redirect = 8; oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. // Please note that query string in path_redirect will override the // request's query string and will not be stripped. // // For example, let's say we have the following routes: // // - match: { path: "/old-path-1" } // redirect: { path_redirect: "/new-path-1" } // - match: { path: "/old-path-2" } // redirect: { path_redirect: "/new-path-2", strip-query: "true" } // - match: { path: "/old-path-3" } // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } // // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during redirection, the matched prefix (or path) // should be swapped with this value. This option allows redirect URLs be dynamically created // based on the request. // // .. attention:: // // Pay attention to the use of trailing slashes as mentioned in // :ref:`RouteAction's prefix_rewrite `. string prefix_rewrite = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // The HTTP status code to use in the redirect response. The default response // code is MOVED_PERMANENTLY (301). RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; // Indicates that during redirection, the query portion of the URL will // be removed. Default value is false. bool strip_query = 6; } message DirectResponseAction { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.DirectResponseAction"; // Specifies the HTTP response status to be returned. uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; // Specifies the content of the response body. If this setting is omitted, // no body is included in the generated response. // // .. note:: // // Headers can be specified using *response_headers_to_add* in the enclosing // :ref:`envoy_api_msg_config.route.v3.Route`, :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` or // :ref:`envoy_api_msg_config.route.v3.VirtualHost`. core.v3.DataSource body = 2; } message Decorator { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Decorator"; // The operation name associated with the request matched to this route. If tracing is // enabled, this information will be used as the span name reported for this request. // // .. note:: // // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. string operation = 1 [(validate.rules).string = {min_len: 1}]; // Whether the decorated details should be propagated to the other party. The default is true. google.protobuf.BoolValue propagate = 2; } message Tracing { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.Tracing"; // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% type.v3.FractionalPercent client_sampling = 1; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.v3.FractionalPercent random_sampling = 2; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random // sampling). This field functions as an upper limit on the total configured sampling rate. For // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% // of client requests with the appropriate headers to be force traced. This field is a direct // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.v3.FractionalPercent overall_sampling = 3; // A list of custom tags with unique tag name to create tags for the active span. // It will take effect after merging with the :ref:`corresponding configuration // ` // configured in the HTTP connection manager. If two tags with the same name are configured // each in the HTTP connection manager and the route level, the one configured here takes // priority. repeated type.tracing.v3.CustomTag custom_tags = 4; } // A virtual cluster is a way of specifying a regex matching rule against // certain important endpoints such that statistics are generated explicitly for // the matched requests. The reason this is useful is that when doing // prefix/path matching Envoy does not always know what the application // considers to be an endpoint. Thus, it’s impossible for Envoy to generically // emit per endpoint statistics. However, often systems have highly critical // endpoints that they wish to get “perfect” statistics on. Virtual cluster // statistics are perfect in the sense that they are emitted on the downstream // side such that they include network level failures. // // Documentation for :ref:`virtual cluster statistics `. // // .. note:: // // Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for // every application endpoint. This is both not easily maintainable and as well the matching and // statistics output are not free. message VirtualCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.VirtualCluster"; reserved 1, 3; reserved "pattern", "method"; // Specifies a list of header matchers to use for matching requests. Each specified header must // match. The pseudo-headers `:path` and `:method` can be used to match the request path and // method, respectively. repeated HeaderMatcher headers = 4; // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. string name = 2 [(validate.rules).string = {min_len: 1}]; } // Global rate limiting :ref:`architecture overview `. message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit"; // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action"; // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("source_cluster", "") // // is derived from the :option:`--service-cluster` option. message SourceCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action.SourceCluster"; } // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("destination_cluster", "") // // Once a request matches against a route table rule, a routed cluster is determined by one of // the following :ref:`route table configuration ` // settings: // // * :ref:`cluster ` indicates the upstream cluster // to route to. // * :ref:`weighted_clusters ` // chooses a cluster randomly from a set of clusters with attributed weight. // * :ref:`cluster_header ` indicates which // header in the request contains the target cluster. message DestinationCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action.DestinationCluster"; } // The following descriptor entry is appended when a header contains a key that matches the // *header_name*: // // .. code-block:: cpp // // ("", "") message RequestHeaders { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action.RequestHeaders"; // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. string header_name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; // If set to true, Envoy skips the descriptor while calling rate limiting service // when header is not present in the request. By default it skips calling the // rate limiting service if this header is not present in the request. bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the // trusted address from :ref:`x-forwarded-for `: // // .. code-block:: cpp // // ("remote_address", "") message RemoteAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action.RemoteAddress"; } // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("generic_key", "") message GenericKey { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action.GenericKey"; // The value to use in the descriptor entry. string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // An optional key to use in the descriptor entry. If not set it defaults // to 'generic_key' as the descriptor key. string descriptor_key = 2; } // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("header_match", "") message HeaderValueMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.RateLimit.Action.HeaderValueMatch"; // The value to use in the descriptor entry. string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a // descriptor entry when the request does not match the headers. The // default value is true. google.protobuf.BoolValue expect_match = 2; // Specifies a set of headers that the rate limit action should match // on. The action will check the request’s headers against all the // specified headers in the config. A match will happen if all the // headers in the config are present in the request with the same values // (or based on presence if the value field is not in the config). repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } // The following descriptor entry is appended when the dynamic metadata contains a key value: // // .. code-block:: cpp // // ("", "") message DynamicMetaData { // The key to use in the descriptor entry. string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; // An optional value to use if *metadata_key* is empty. If not set and // no value is present under the metadata_key then no descriptor is generated. string default_value = 3; } oneof action_specifier { option (validate.required) = true; // Rate limit on source cluster. SourceCluster source_cluster = 1; // Rate limit on destination cluster. DestinationCluster destination_cluster = 2; // Rate limit on request headers. RequestHeaders request_headers = 3; // Rate limit on remote address. RemoteAddress remote_address = 4; // Rate limit on a generic key. GenericKey generic_key = 5; // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; // Rate limit on dynamic metadata. DynamicMetaData dynamic_metadata = 7; } } message Override { // Fetches the override from the dynamic metadata. message DynamicMetadata { // Metadata struct that defines the key and path to retrieve the struct value. // The value must be a struct containing an integer "requests_per_unit" property // and a "unit" property with a value parseable to :ref:`RateLimitUnit // enum ` type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; } oneof override_specifier { option (validate.required) = true; // Limit override from dynamic metadata. DynamicMetadata dynamic_metadata = 1; } } // Refers to the stage set in the filter. The rate limit configuration only // applies to filters with the same stage number. The default stage number is // 0. // // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; // The key to be set in runtime to disable this rate limit configuration. string disable_key = 2; // A list of actions that are to be applied for this rate limit configuration. // Order matters as the actions are processed sequentially and the descriptor // is composed by appending descriptor entries in that sequence. If an action // cannot append a descriptor entry, no descriptor is generated for the // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; // An optional limit override to be appended to the descriptor produced by this // rate limit configuration. If the override value is invalid or cannot be resolved // from metadata, no override is provided. See :ref:`rate limit override // ` for more information. Override limit = 4; } // .. attention:: // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* // header. Thus, if attempting to match on *Host*, match on *:authority* instead. // // .. attention:: // // To route on HTTP method, use the special HTTP/2 *:method* header. This works for both // HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., // // .. code-block:: json // // { // "name": ":method", // "exact_match": "POST" // } // // .. attention:: // In the absence of any header match specifier, match will default to :ref:`present_match // `. i.e, a request that has the :ref:`name // ` header will match, regardless of the header's // value. // // [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] // [#next-free-field: 13] message HeaderMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.HeaderMatcher"; reserved 2, 3, 5; reserved "regex_match"; // Specifies the name of the header in the request. string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { // If specified, header match will be performed based on the value of the header. string exact_match = 4; // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. type.matcher.v3.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. // The entire request header value must represent an integer in base 10 notation: consisting of // an optional plus or minus sign followed by a sequence of digits. The rule will not match if // the header value does not represent an integer. Match will fail for empty values, floating // point numbers or if only a subsequence of the header value is an integer. // // Examples: // // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, // "-1somestring" type.v3.Int64Range range_match = 6; // If specified, header match will be performed based on whether the header is in the // request. bool present_match = 7; // If specified, header match will be performed based on the prefix of the header value. // Note: empty prefix is not allowed, please use present_match instead. // // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. string prefix_match = 9 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. // // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. string suffix_match = 10 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on whether the header value contains // the given value or not. // Note: empty contains match is not allowed, please use present_match instead. // // Examples: // // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. string contains_match = 12 [(validate.rules).string = {min_len: 1}]; } // If specified, the match result will be inverted before checking. Defaults to false. // // Examples: // // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. // * The range [-10,0) will match the value -1, so it will not match when inverted. bool invert_match = 8; } // Query parameter matching treats the query string of a request's :path header // as an ampersand-separated list of keys and/or key=value elements. // [#next-free-field: 7] message QueryParameterMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.route.QueryParameterMatcher"; reserved 3, 4; reserved "value", "regex"; // Specifies the name of a key that must be present in the requested // *path*'s query string. string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. type.matcher.v3.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; } } // HTTP Internal Redirect :ref:`architecture overview `. message InternalRedirectPolicy { // An internal redirect is not handled, unless the number of previous internal redirects that a // downstream request has encountered is lower than this value. // In the case where a downstream request is bounced among multiple routes by internal redirect, // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy // ` // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. google.protobuf.UInt32Value max_internal_redirects = 1; // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, // only 302 will be treated as internal redirect. // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; // Specifies a list of predicates that are queried when an upstream response is deemed // to trigger an internal redirect by all other criteria. Any predicate in the list can reject // the redirect, causing the response to be proxied to downstream. repeated core.v3.TypedExtensionConfig predicates = 3; // Allow internal redirect to follow a target URI with a different scheme than the value of // x-forwarded-proto. The default is false. bool allow_cross_scheme_redirect = 4; } ================================================ FILE: api/envoy/config/route/v3/scoped_route.proto ================================================ syntax = "proto3"; package envoy.config.route.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v3"; option java_outer_classname = "ScopedRouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` // Specifies a routing scope, which associates a // :ref:`Key` to a // :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` (identified by its resource name). // // The HTTP connection manager builds up a table consisting of these Key to // RouteConfiguration mappings, and looks up the RouteConfiguration to use per // request according to the algorithm specified in the // :ref:`scope_key_builder` // assigned to the HttpConnectionManager. // // For example, with the following configurations (in YAML): // // HttpConnectionManager config: // // .. code:: // // ... // scoped_routes: // name: foo-scoped-routes // scope_key_builder: // fragments: // - header_value_extractor: // name: X-Route-Selector // element_separator: , // element: // separator: = // key: vip // // ScopedRouteConfiguration resources (specified statically via // :ref:`scoped_route_configurations_list` // or obtained dynamically via SRDS): // // .. code:: // // (1) // name: route-scope1 // route_configuration_name: route-config1 // key: // fragments: // - string_key: 172.10.10.20 // // (2) // name: route-scope2 // route_configuration_name: route-config2 // key: // fragments: // - string_key: 172.20.20.30 // // A request from a client such as: // // .. code:: // // GET / HTTP/1.1 // Host: foo.com // X-Route-Selector: vip=172.10.10.20 // // would result in the routing table defined by the `route-config1` // RouteConfiguration being assigned to the HTTP request/stream. // message ScopedRouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ScopedRouteConfiguration"; // Specifies a key which is matched against the output of the // :ref:`scope_key_builder` // specified in the HttpConnectionManager. The matching is done per HTTP // request and is dependent on the order of the fragments contained in the // Key. message Key { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ScopedRouteConfiguration.Key"; message Fragment { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ScopedRouteConfiguration.Key.Fragment"; oneof type { option (validate.required) = true; // A string to match against. string string_key = 1; } } // The ordered set of fragments to match against. The order must match the // fragments in the corresponding // :ref:`scope_key_builder`. repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } // Whether the RouteConfiguration should be loaded on demand. bool on_demand = 4; // The name assigned to the routing scope. string name = 1 [(validate.rules).string = {min_len: 1}]; // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v3.DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_config.route.v3.RouteConfiguration` associated // with this scope. string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; // The key to match against. Key key = 3 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/route/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/route/v4alpha/route.proto ================================================ syntax = "proto3"; package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: HTTP route configuration] // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` // [#next-free-field: 11] message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteConfiguration"; // The name of the route configuration. For example, it might match // :ref:`route_config_name // ` in // :ref:`envoy_api_msg_extensions.filters.network.http_connection_manager.v4alpha.Rds`. string name = 1; // An array of virtual hosts that make up the route table. repeated VirtualHost virtual_hosts = 2; // An array of virtual hosts will be dynamically loaded via the VHDS API. // Both *virtual_hosts* and *vhds* fields will be used when present. *virtual_hosts* can be used // for a base routing table or for infrequently changing virtual hosts. *vhds* is used for // on-demand discovery of virtual hosts. The contents of these two fields will be merged to // generate a routing table for a given RouteConfiguration, with *vhds* derived configuration // taking precedence. Vhds vhds = 9; // Optionally specifies a list of HTTP headers that the connection manager // will consider to be internal only. If they are found on external requests they will be cleaned // prior to filter invocation. See :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information. repeated string internal_only_headers = 3 [ (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} ]; // Specifies a list of HTTP headers that should be added to each response that // the connection manager encodes. Headers specified at this level are applied // after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v4alpha.HeaderValueOption response_headers_to_add = 4 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // that the connection manager encodes. repeated string response_headers_to_remove = 5 [ (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} ]; // Specifies a list of HTTP headers that should be added to each request // routed by the HTTP connection manager. Headers specified at this level are // applied after headers from any enclosed :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` or // :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v4alpha.HeaderValueOption request_headers_to_add = 6 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // routed by the HTTP connection manager. repeated string request_headers_to_remove = 8 [ (validate.rules).repeated = {items {string {well_known_regex: HTTP_HEADER_NAME strict: false}}} ]; // By default, headers that should be added/removed are evaluated from most to least specific: // // * route level // * virtual host level // * connection manager level // // To allow setting overrides at the route or virtual host level, this order can be reversed // by setting this option to true. Defaults to false. // // [#next-major-version: In the v3 API, this will default to true.] bool most_specific_header_mutations_wins = 10; // An optional boolean that specifies whether the clusters that the route // table refers to will be validated by the cluster manager. If set to true // and a route refers to a non-existent cluster, the route table will not // load. If set to false and a route refers to a non-existent cluster, the // route table will load and the router filter will return a 404 if the route // is selected at runtime. This setting defaults to true if the route table // is statically defined via the :ref:`route_config // ` // option. This setting default to false if the route table is loaded dynamically via the // :ref:`rds // ` // option. Users may wish to override the default behavior in certain cases (for example when // using CDS with a static route table). google.protobuf.BoolValue validate_clusters = 7; } message Vhds { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Vhds"; // Configuration source specifier for VHDS. core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/route/v4alpha/route_components.proto ================================================ syntax = "proto3"; package envoy.config.route.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/proxy_protocol.proto"; import "envoy/type/matcher/v4alpha/regex.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/metadata/v3/metadata.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "envoy/type/v3/range.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; option java_outer_classname = "RouteComponentsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: HTTP route components] // * Routing :ref:`architecture overview ` // * HTTP :ref:`router filter ` // The top level element in the routing configuration is a virtual host. Each virtual host has // a logical name as well as a set of domains that get routed to it based on the incoming request's // host header. This allows a single listener to service multiple top level domain path trees. Once // a virtual host is selected based on the domain, the routes are processed in order to see which // upstream cluster to route to or whether to perform a redirect. // [#next-free-field: 21] message VirtualHost { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.VirtualHost"; enum TlsRequirementType { // No TLS requirement for the virtual host. NONE = 0; // External requests must use TLS. If a request is external and it is not // using TLS, a 301 redirect will be sent telling the client to use HTTPS. EXTERNAL_ONLY = 1; // All requests must use TLS. If a request is not using TLS, a 301 redirect // will be sent telling the client to use HTTPS. ALL = 2; } reserved 9, 12; reserved "per_filter_config"; // The logical name of the virtual host. This is used when emitting certain // statistics but is not relevant for routing. string name = 1 [(validate.rules).string = {min_len: 1}]; // A list of domains (host/authority header) that will be matched to this // virtual host. Wildcard hosts are supported in the suffix or prefix form. // // Domain search order: // 1. Exact domain names: ``www.foo.com``. // 2. Suffix domain wildcards: ``*.foo.com`` or ``*-bar.foo.com``. // 3. Prefix domain wildcards: ``foo.*`` or ``foo-*``. // 4. Special wildcard ``*`` matching any domain. // // .. note:: // // The wildcard will not match the empty string. // e.g. ``*-bar.foo.com`` will match ``baz-bar.foo.com`` but not ``-bar.foo.com``. // The longest wildcards match first. // Only a single virtual host in the entire route configuration can match on ``*``. A domain // must be unique across all virtual hosts or the config will fail to load. // // Domains cannot contain control characters. This is validated by the well_known_regex HTTP_HEADER_VALUE. repeated string domains = 2 [(validate.rules).repeated = { min_items: 1 items {string {well_known_regex: HTTP_HEADER_VALUE strict: false}} }]; // The list of routes that will be matched, in order, for incoming requests. // The first route that matches will be used. repeated Route routes = 3; // Specifies the type of TLS enforcement the virtual host expects. If this option is not // specified, there is no TLS requirement for the virtual host. TlsRequirementType require_tls = 4 [(validate.rules).enum = {defined_only: true}]; // A list of virtual clusters defined for this virtual host. Virtual clusters // are used for additional statistics gathering. repeated VirtualCluster virtual_clusters = 5; // Specifies a set of rate limit configurations that will be applied to the // virtual host. repeated RateLimit rate_limits = 6; // Specifies a list of HTTP headers that should be added to each request // handled by this virtual host. Headers specified at this level are applied // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v4alpha.HeaderValueOption request_headers_to_add = 7 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // handled by this virtual host. repeated string request_headers_to_remove = 13 [(validate.rules).repeated = { items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of HTTP headers that should be added to each response // handled by this virtual host. Headers specified at this level are applied // after headers from enclosed :ref:`envoy_api_msg_config.route.v4alpha.Route` and before headers from the // enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // handled by this virtual host. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Indicates that the virtual host has a CORS policy. CorsPolicy cors = 8; // The per_filter_config field can be used to provide virtual host-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. map typed_per_filter_config = 15; // Decides whether the :ref:`x-envoy-attempt-count // ` header should be included // in the upstream request. Setting this option will cause it to override any existing header // value, so in the case of two Envoys on the request path with this option enabled, the upstream // will see the attempt count as perceived by the second Envoy. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers // ` flag. // // [#next-major-version: rename to include_attempt_count_in_request.] bool include_request_attempt_count = 14; // Decides whether the :ref:`x-envoy-attempt-count // ` header should be included // in the downstream response. Setting this option will cause the router to override any existing header // value, so in the case of two Envoys on the request path with this option enabled, the downstream // will see the attempt count as perceived by the Envoy closest upstream from itself. Defaults to false. // This header is unaffected by the // :ref:`suppress_envoy_headers // ` flag. bool include_attempt_count_in_response = 19; // Indicates the retry policy for all routes in this virtual host. Note that setting a // route level entry will take precedence over this config and it'll be treated // independently (e.g.: values are not inherited). RetryPolicy retry_policy = 16; // [#not-implemented-hide:] // Specifies the configuration for retry policy extension. Note that setting a route level entry // will take precedence over this config and it'll be treated independently (e.g.: values are not // inherited). :ref:`Retry policy ` should not be // set if this field is used. google.protobuf.Any retry_policy_typed_config = 20; // Indicates the hedge policy for all routes in this virtual host. Note that setting a // route level entry will take precedence over this config and it'll be treated // independently (e.g.: values are not inherited). HedgePolicy hedge_policy = 17; // The maximum bytes which will be buffered for retries and shadowing. // If set and a route-specific limit is not set, the bytes actually buffered will be the minimum // value of this and the listener per_connection_buffer_limit_bytes. google.protobuf.UInt32Value per_request_buffer_limit_bytes = 18; } // A filter-defined action type. message FilterAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.FilterAction"; google.protobuf.Any action = 1; } // A route is both a specification of how to match a request as well as an indication of what to do // next (e.g., redirect, forward, rewrite, etc.). // // .. attention:: // // Envoy supports routing on HTTP method via :ref:`header matching // `. // [#next-free-field: 18] message Route { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Route"; reserved 6, 8; reserved "per_filter_config"; // Name for the route. string name = 14; // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; oneof action { option (validate.required) = true; // Route request to some upstream cluster. RouteAction route = 2; // Return a redirect. RedirectAction redirect = 3; // Return an arbitrary HTTP response directly, without proxying. DirectResponseAction direct_response = 7; // [#not-implemented-hide:] // If true, a filter will define the action (e.g., it could dynamically generate the // RouteAction). // [#comment: TODO(samflattery): Remove cleanup in route_fuzz_test.cc when // implemented] FilterAction filter_action = 17; } // The Metadata field can be used to provide additional information // about the route. It can be used for configuration, stats, and logging. // The metadata should go under the filter namespace that will need it. // For instance, if the metadata is intended for the Router filter, // the filter name should be specified as *envoy.filters.http.router*. core.v4alpha.Metadata metadata = 4; // Decorator for the matched route. Decorator decorator = 5; // The typed_per_filter_config field can be used to provide route-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` for // if and how it is utilized. map typed_per_filter_config = 13; // Specifies a set of headers that will be added to requests matching this // route. Headers specified at this level are applied before headers from the // enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v4alpha.HeaderValueOption request_headers_to_add = 9 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request // matching this route. repeated string request_headers_to_remove = 12 [(validate.rules).repeated = { items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a set of headers that will be added to responses to requests // matching this route. Headers specified at this level are applied before // headers from the enclosing :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost` and // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including // details on header value syntax, see the documentation on // :ref:`custom request headers `. repeated core.v4alpha.HeaderValueOption response_headers_to_add = 10 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each response // to requests matching this route. repeated string response_headers_to_remove = 11 [(validate.rules).repeated = { items {string {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Presence of the object defines whether the connection manager's tracing configuration // is overridden by this route specific instance. Tracing tracing = 15; // The maximum bytes which will be buffered for retries and shadowing. // If set, the bytes actually buffered will be the minimum value of this and the // listener per_connection_buffer_limit_bytes. google.protobuf.UInt32Value per_request_buffer_limit_bytes = 16; } // Compared to the :ref:`cluster ` field that specifies a // single upstream cluster as the target of a request, the :ref:`weighted_clusters // ` option allows for specification of // multiple upstream clusters along with weights that indicate the percentage of // traffic to be forwarded to each cluster. The router selects an upstream cluster based on the // weights. message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.WeightedCluster"; // [#next-free-field: 11] message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.WeightedCluster.ClusterWeight"; reserved 7, 8; reserved "per_filter_config"; // Name of the upstream cluster. The cluster must exist in the // :ref:`cluster manager configuration `. string name = 1 [(validate.rules).string = {min_len: 1}]; // An integer between 0 and :ref:`total_weight // `. When a request matches the route, // the choice of an upstream cluster is determined by its weight. The sum of weights across all // entries in the clusters array must add up to the total_weight, which defaults to 100. google.protobuf.UInt32Value weight = 2; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field will be considered for // load balancing. Note that this will be merged with what's provided in // :ref:`RouteAction.metadata_match `, with // values here taking precedence. The filter name should be specified as *envoy.lb*. core.v4alpha.Metadata metadata_match = 3; // Specifies a list of headers to be added to requests when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. // Headers specified at this level are applied before headers from the enclosing // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v4alpha.HeaderValueOption request_headers_to_add = 4 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of HTTP headers that should be removed from each request when // this cluster is selected through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. repeated string request_headers_to_remove = 9 [(validate.rules).repeated = { items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // Specifies a list of headers to be added to responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. // Headers specified at this level are applied before headers from the enclosing // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`, and // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration`. For more information, including details on // header value syntax, see the documentation on :ref:`custom request headers // `. repeated core.v4alpha.HeaderValueOption response_headers_to_add = 5 [(validate.rules).repeated = {max_items: 1000}]; // Specifies a list of headers to be removed from responses when this cluster is selected // through the enclosing :ref:`envoy_api_msg_config.route.v4alpha.RouteAction`. repeated string response_headers_to_remove = 6 [(validate.rules).repeated = { items {string {well_known_regex: HTTP_HEADER_NAME strict: false}} }]; // The per_filter_config field can be used to provide weighted cluster-specific // configurations for filters. The key should match the filter name, such as // *envoy.filters.http.buffer* for the HTTP buffer filter. Use of this field is filter // specific; see the :ref:`HTTP filter documentation ` // for if and how it is utilized. map typed_per_filter_config = 10; } // Specifies one or more upstream clusters associated with the route. repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; // Specifies the total weight across all clusters. The sum of all cluster weights must equal this // value, which must be greater than 0. Defaults to 100. google.protobuf.UInt32Value total_weight = 3 [(validate.rules).uint32 = {gte: 1}]; // Specifies the runtime key prefix that should be used to construct the // runtime keys associated with each cluster. When the *runtime_key_prefix* is // specified, the router will look for weights associated with each upstream // cluster under the key *runtime_key_prefix* + "." + *cluster[i].name* where // *cluster[i]* denotes an entry in the clusters array field. If the runtime // key for the cluster does not exist, the value specified in the // configuration file will be used as the default weight. See the :ref:`runtime documentation // ` for how key names map to the underlying implementation. string runtime_key_prefix = 2; } // [#next-free-field: 13] message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch"; message GrpcRouteMatchOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch.GrpcRouteMatchOptions"; } message TlsContextMatchOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch.TlsContextMatchOptions"; // If specified, the route will match against whether or not a certificate is presented. // If not specified, certificate presentation status (true or false) will not be considered when route matching. google.protobuf.BoolValue presented = 1; // If specified, the route will match against whether or not a certificate is validated. // If not specified, certificate validation status (true or false) will not be considered when route matching. google.protobuf.BoolValue validated = 2; } // An extensible message for matching CONNECT requests. message ConnectMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteMatch.ConnectMatcher"; } reserved 5, 3; reserved "regex"; oneof path_specifier { option (validate.required) = true; // If specified, the route is a prefix rule meaning that the prefix must // match the beginning of the *:path* header. string prefix = 1; // If specified, the route is an exact path rule meaning that the path must // exactly match the *:path* header once the query string is removed. string path = 2; // If specified, the route is a regular expression rule meaning that the // regex must match the *:path* header once the query string is removed. The entire path // (without the query string) must match the regex. The rule will not match if only a // subsequence of the *:path* header matches the regex. // // [#next-major-version: In the v3 API we should redo how path specification works such // that we utilize StringMatcher, and additionally have consistent options around whether we // strip query strings, do a case sensitive match, etc. In the interim it will be too disruptive // to deprecate the existing options. We should even consider whether we want to do away with // path_specifier entirely and just rely on a set of header matchers which can already match // on :path, etc. The issue with that is it is unclear how to generically deal with query string // stripping. This needs more thought.] type.matcher.v4alpha.RegexMatcher safe_regex = 10 [(validate.rules).message = {required: true}]; // If this is used as the matcher, the matcher will only match CONNECT requests. // Note that this will not match HTTP/2 upgrade-style CONNECT requests // (WebSocket and the like) as they are normalized in Envoy as HTTP/1.1 style // upgrades. // This is the only way to match CONNECT requests for HTTP/1.1. For HTTP/2, // where Extended CONNECT requests may have a path, the path matchers will work if // there is a path present. // Note that CONNECT support is currently considered alpha in Envoy. // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectMatcher connect_matcher = 12; } // Indicates that prefix/path matching should be case sensitive. The default // is true. google.protobuf.BoolValue case_sensitive = 4; // Indicates that the route should additionally match on a runtime key. Every time the route // is considered for a match, it must also fall under the percentage of matches indicated by // this field. For some fraction N/D, a random number in the range [0,D) is selected. If the // number is <= the value of the numerator N, or if the key is not present, the default // value, the router continues to evaluate the remaining match criteria. A runtime_fraction // route configuration can be used to roll out route changes in a gradual manner without full // code/config deploys. Refer to the :ref:`traffic shifting // ` docs for additional documentation. // // .. note:: // // Parsing this field is implemented such that the runtime key's data may be represented // as a FractionalPercent proto represented as JSON/YAML and may also be represented as an // integer with the assumption that the value is an integral percentage out of 100. For // instance, a runtime key lookup returning the value "42" would parse as a FractionalPercent // whose numerator is 42 and denominator is HUNDRED. This preserves legacy semantics. core.v4alpha.RuntimeFractionalPercent runtime_fraction = 9; // Specifies a set of headers that the route should match on. The router will // check the request’s headers against all the specified headers in the route // config. A match will happen if all the headers in the route are present in // the request with the same values (or based on presence if the value field // is not in the config). repeated HeaderMatcher headers = 6; // Specifies a set of URL query parameters on which the route should // match. The router will check the query string from the *path* header // against all the specified query parameters. If the number of specified // query parameters is nonzero, they all must match the *path* header's // query string for a match to occur. repeated QueryParameterMatcher query_parameters = 7; // If specified, only gRPC requests will be matched. The router will check // that the content-type header has a application/grpc or one of the various // application/grpc+ values. GrpcRouteMatchOptions grpc = 8; // If specified, the client tls context will be matched against the defined // match options. // // [#next-major-version: unify with RBAC] TlsContextMatchOptions tls_context = 11; } // [#next-free-field: 12] message CorsPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.CorsPolicy"; reserved 1, 8, 7; reserved "allow_origin", "allow_origin_regex", "enabled"; // Specifies string patterns that match allowed origins. An origin is allowed if any of the // string matchers match. repeated type.matcher.v4alpha.StringMatcher allow_origin_string_match = 11; // Specifies the content for the *access-control-allow-methods* header. string allow_methods = 2; // Specifies the content for the *access-control-allow-headers* header. string allow_headers = 3; // Specifies the content for the *access-control-expose-headers* header. string expose_headers = 4; // Specifies the content for the *access-control-max-age* header. string max_age = 5; // Specifies whether the resource allows credentials. google.protobuf.BoolValue allow_credentials = 6; oneof enabled_specifier { // Specifies the % of requests for which the CORS filter is enabled. // // If neither ``enabled``, ``filter_enabled``, nor ``shadow_enabled`` are specified, the CORS // filter will be enabled for 100% of the requests. // // If :ref:`runtime_key ` is // specified, Envoy will lookup the runtime key to get the percentage of requests to filter. core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; } // Specifies the % of requests for which the CORS policies will be evaluated and tracked, but not // enforced. // // This field is intended to be used when ``filter_enabled`` and ``enabled`` are off. One of those // fields have to explicitly disable the filter in order for this setting to take effect. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate // and track the request's *Origin* to determine if it's valid but will not enforce any policies. core.v4alpha.RuntimeFractionalPercent shadow_enabled = 10; } // [#next-free-field: 37] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction"; enum ClusterNotFoundResponseCode { // HTTP status code - 503 Service Unavailable. SERVICE_UNAVAILABLE = 0; // HTTP status code - 404 Not Found. NOT_FOUND = 1; } // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are // collected for the shadow cluster making this feature useful for testing. // // During shadowing, the host/authority header is altered such that *-shadow* is appended. This is // useful for logging. For example, *cluster1* becomes *cluster1-shadow*. // // .. note:: // // Shadowing will not be triggered if the primary cluster does not exist. message RequestMirrorPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.RequestMirrorPolicy"; reserved 2; reserved "runtime_key"; // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // If not specified, all requests to the target cluster will be mirrored. // // If specified, this field takes precedence over the `runtime_key` field and requests must also // fall under the percentage of matches indicated by this field. // // For some fraction N/D, a random number in the range [0,D) is selected. If the // number is <= the value of the numerator N, or if the key is not present, the default // value, the request will be mirrored. core.v4alpha.RuntimeFractionalPercent runtime_fraction = 3; // Determines if the trace span should be sampled. Defaults to true. google.protobuf.BoolValue trace_sampled = 4; } // Specifies the route's hashing policy if the upstream cluster uses a hashing :ref:`load balancer // `. // [#next-free-field: 7] message HashPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.HashPolicy"; message Header { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.HashPolicy.Header"; // The name of the request header that will be used to obtain the hash // key. If the request header is not present, no hash will be produced. string header_name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // If specified, the request header value will be rewritten and used // to produce the hash key. type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 2; } // Envoy supports two types of cookie affinity: // // 1. Passive. Envoy takes a cookie that's present in the cookies header and // hashes on its value. // // 2. Generated. Envoy generates and sets a cookie with an expiration (TTL) // on the first request from the client in its response to the client, // based on the endpoint the request gets sent to. The client then // presents this on the next and all subsequent requests. The hash of // this is sufficient to ensure these requests get sent to the same // endpoint. The cookie is generated by hashing the source and // destination ports and addresses so that multiple independent HTTP2 // streams on the same connection will independently receive the same // cookie, even if they arrive at the Envoy simultaneously. message Cookie { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.HashPolicy.Cookie"; // The name of the cookie that will be used to obtain the hash key. If the // cookie is not present and ttl below is not set, no hash will be // produced. string name = 1 [(validate.rules).string = {min_len: 1}]; // If specified, a cookie with the TTL will be generated if the cookie is // not present. If the TTL is present and zero, the generated cookie will // be a session cookie. google.protobuf.Duration ttl = 2; // The name of the path for the cookie. If no path is specified here, no path // will be set for the cookie. string path = 3; } message ConnectionProperties { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.HashPolicy.ConnectionProperties"; // Hash on source IP address. bool source_ip = 1; } message QueryParameter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.HashPolicy.QueryParameter"; // The name of the URL query parameter that will be used to obtain the hash // key. If the parameter is not present, no hash will be produced. Query // parameter names are case-sensitive. string name = 1 [(validate.rules).string = {min_len: 1}]; } message FilterState { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.HashPolicy.FilterState"; // The name of the Object in the per-request filterState, which is an // Envoy::Http::Hashable object. If there is no data associated with the key, // or the stored object is not Envoy::Http::Hashable, no hash will be produced. string key = 1 [(validate.rules).string = {min_len: 1}]; } oneof policy_specifier { option (validate.required) = true; // Header hash policy. Header header = 1; // Cookie hash policy. Cookie cookie = 2; // Connection properties hash policy. ConnectionProperties connection_properties = 3; // Query parameter hash policy. QueryParameter query_parameter = 5; // Filter state hash policy. FilterState filter_state = 6; } // The flag that short-circuits the hash computing. This field provides a // 'fallback' style of configuration: "if a terminal policy doesn't work, // fallback to rest of the policy list", it saves time when the terminal // policy works. // // If true, and there is already a hash computed, ignore rest of the // list of hash polices. // For example, if the following hash methods are configured: // // ========= ======== // specifier terminal // ========= ======== // Header A true // Header B false // Header C false // ========= ======== // // The generateHash process ends if policy "header A" generates a hash, as // it's a terminal policy. bool terminal = 4; } // Allows enabling and disabling upgrades on a per-route basis. // This overrides any enabled/disabled upgrade filter chain specified in the // HttpConnectionManager // :ref:`upgrade_configs // ` // but does not affect any custom filter chain specified there. message UpgradeConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig"; // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. message ConnectConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.UpgradeConfig.ConnectConfig"; // If present, the proxy protocol header will be prepended to the CONNECT payload sent upstream. core.v4alpha.ProxyProtocolConfig proxy_protocol_config = 1; } // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] will be proxied upstream. string upgrade_type = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Determines if upgrades are available on this route. Defaults to true. google.protobuf.BoolValue enabled = 2; // Configuration for sending data upstream as a raw data payload. This is used for // CONNECT requests, when forwarding CONNECT payload as raw TCP. // Note that CONNECT support is currently considered alpha in Envoy. // [#comment:TODO(htuch): Replace the above comment with an alpha tag. ConnectConfig connect_config = 3; } message MaxStreamDuration { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RouteAction.MaxStreamDuration"; // Specifies the maximum duration allowed for streams on the route. If not specified, the value // from the :ref:`max_stream_duration // ` field in // :ref:`HttpConnectionManager.common_http_protocol_options // ` // is used. If this field is set explicitly to zero, any // HttpConnectionManager max_stream_duration timeout will be disabled for // this route. google.protobuf.Duration max_stream_duration = 1; // If present, and the request contains a `grpc-timeout header // `_, use that value as the // *max_stream_duration*, but limit the applied timeout to the maximum value specified here. // If set to 0, the `grpc-timeout` header is used without modification. google.protobuf.Duration grpc_timeout_header_max = 2; // If present, Envoy will adjust the timeout provided by the `grpc-timeout` header by // subtracting the provided duration from the header. This is useful for allowing Envoy to set // its global timeout to be less than that of the deadline imposed by the calling client, which // makes it more likely that Envoy will handle the timeout instead of having the call canceled // by the client. If, after applying the offset, the resulting timeout is zero or negative, // the stream will timeout immediately. google.protobuf.Duration grpc_timeout_header_offset = 3; } reserved 12, 18, 19, 16, 22, 21, 10, 14, 23, 28, 26, 31; reserved "request_mirror_policy", "include_vh_rate_limits", "max_grpc_timeout", "grpc_timeout_offset", "internal_redirect_action", "max_internal_redirects"; oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed // to. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Envoy will determine the cluster to route to by reading the value of the // HTTP header named by cluster_header from the request headers. If the // header is not found or the referenced cluster does not exist, Envoy will // return a 404 response. // // .. attention:: // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 // *Host* header. Thus, if attempting to match on *Host*, match on *:authority* instead. string cluster_header = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. See // :ref:`traffic splitting ` // for additional documentation. WeightedCluster weighted_clusters = 3; } // The HTTP status code to use when configured cluster is not found. // The default response code is 503 Service Unavailable. ClusterNotFoundResponseCode cluster_not_found_response_code = 20 [(validate.rules).enum = {defined_only: true}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what's set in this field will be considered // for load balancing. If using :ref:`weighted_clusters // `, metadata will be merged, with values // provided there taking precedence. The filter name should be specified as *envoy.lb*. core.v4alpha.Metadata metadata_match = 4; // Indicates that during forwarding, the matched prefix (or path) should be // swapped with this value. This option allows application URLs to be rooted // at a different path from those exposed at the reverse proxy layer. The router filter will // place the original path before rewrite into the :ref:`x-envoy-original-path // ` header. // // Only one of *prefix_rewrite* or // :ref:`regex_rewrite ` // may be specified. // // .. attention:: // // Pay careful attention to the use of trailing slashes in the // :ref:`route's match ` prefix value. // Stripping a prefix from a path requires multiple Routes to handle all cases. For example, // rewriting */prefix* to */* and */prefix/etc* to */etc* cannot be done in a single // :ref:`Route `, as shown by the below config entries: // // .. code-block:: yaml // // - match: // prefix: "/prefix/" // route: // prefix_rewrite: "/" // - match: // prefix: "/prefix" // route: // prefix_rewrite: "/" // // Having above entries in the config, requests to */prefix* will be stripped to */*, while // requests to */prefix/etc* will be stripped to */etc*. string prefix_rewrite = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during forwarding, portions of the path that match the // pattern should be rewritten, even allowing the substitution of capture // groups from the pattern into the new path as specified by the rewrite // substitution string. This is useful to allow application paths to be // rewritten in a way that is aware of segments with variable content like // identifiers. The router filter will place the original path as it was // before the rewrite into the :ref:`x-envoy-original-path // ` header. // // Only one of :ref:`prefix_rewrite ` // or *regex_rewrite* may be specified. // // Examples using Google's `RE2 `_ engine: // // * The path pattern ``^/service/([^/]+)(/.*)$`` paired with a substitution // string of ``\2/instance/\1`` would transform ``/service/foo/v1/api`` // into ``/v1/api/instance/foo``. // // * The pattern ``one`` paired with a substitution string of ``two`` would // transform ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/two/zzz``. // // * The pattern ``^(.*?)one(.*)$`` paired with a substitution string of // ``\1two\2`` would replace only the first occurrence of ``one``, // transforming path ``/xxx/one/yyy/one/zzz`` into ``/xxx/two/yyy/one/zzz``. // // * The pattern ``(?i)/xxx/`` paired with a substitution string of ``/yyy/`` // would do a case-insensitive match and transform path ``/aaa/XxX/bbb`` to // ``/aaa/yyy/bbb``. type.matcher.v4alpha.RegexMatchAndSubstitute regex_rewrite = 32; oneof host_rewrite_specifier { // Indicates that during forwarding, the host header will be swapped with // this value. string host_rewrite_literal = 6 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during forwarding, the host header will be swapped with // the hostname of the upstream host chosen by the cluster manager. This // option is applicable only when the destination cluster for a route is of // type *strict_dns* or *logical_dns*. Setting this to true with other cluster // types has no effect. google.protobuf.BoolValue auto_host_rewrite = 7; // Indicates that during forwarding, the host header will be swapped with the content of given // downstream or :ref:`custom ` header. // If header value is empty, host header is left intact. // // .. attention:: // // Pay attention to the potential security implications of using this option. Provided header // must come from trusted source. string host_rewrite_header = 29 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; // Indicates that during forwarding, the host header will be swapped with // the result of the regex substitution executed on path value with query and fragment removed. // This is useful for transitioning variable content between path segment and subdomain. // // For example with the following config: // // .. code-block:: yaml // // host_rewrite_path_regex: // pattern: // google_re2: {} // regex: "^/(.+)/.+$" // substitution: \1 // // Would rewrite the host header to `envoyproxy.io` given the path `/envoyproxy.io/some/path`. type.matcher.v4alpha.RegexMatchAndSubstitute host_rewrite_path_regex = 35; } // Specifies the upstream timeout for the route. If not specified, the default is 15s. This // spans between the point at which the entire downstream request (i.e. end-of-stream) has been // processed and when the upstream response has been completely processed. A value of 0 will // disable the route's timeout. // // .. note:: // // This timeout includes all retries. See also // :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms`, // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms`, and the // :ref:`retry overview `. google.protobuf.Duration timeout = 8; // Specifies the idle timeout for the route. If not specified, there is no per-route idle timeout, // although the connection manager wide :ref:`stream_idle_timeout // ` // will still apply. A value of 0 will completely disable the route's idle timeout, even if a // connection manager stream idle timeout is configured. // // The idle timeout is distinct to :ref:`timeout // `, which provides an upper bound // on the upstream response time; :ref:`idle_timeout // ` instead bounds the amount // of time the request's stream may be idle. // // After header decoding, the idle timeout will apply on downstream and // upstream request events. Each time an encode/decode event for headers or // data is processed for the stream, the timer will be reset. If the timeout // fires, the stream is terminated with a 408 Request Timeout error code if no // upstream response header has been received, otherwise a stream reset // occurs. google.protobuf.Duration idle_timeout = 24; // Indicates that the route has a retry policy. Note that if this is set, // it'll take precedence over the virtual host level retry policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). RetryPolicy retry_policy = 9; // [#not-implemented-hide:] // Specifies the configuration for retry policy extension. Note that if this is set, it'll take // precedence over the virtual host level retry policy entirely (e.g.: policies are not merged, // most internal one becomes the enforced policy). :ref:`Retry policy ` // should not be set if this field is used. google.protobuf.Any retry_policy_typed_config = 33; // Indicates that the route has request mirroring policies. repeated RequestMirrorPolicy request_mirror_policies = 30; // Optionally specifies the :ref:`routing priority `. core.v4alpha.RoutingPriority priority = 11 [(validate.rules).enum = {defined_only: true}]; // Specifies a set of rate limit configurations that could be applied to the // route. repeated RateLimit rate_limits = 13; // Specifies a list of hash policies to use for ring hash load balancing. Each // hash policy is evaluated individually and the combined result is used to // route the request. The method of combination is deterministic such that // identical lists of hash policies will produce the same hash. Since a hash // policy examines specific parts of a request, it can fail to produce a hash // (i.e. if the hashed header is not present). If (and only if) all configured // hash policies fail to generate a hash, no hash will be produced for // the route. In this case, the behavior is the same as if no hash policies // were specified (i.e. the ring hash load balancer will choose a random // backend). If a hash policy has the "terminal" attribute set to true, and // there is already a hash generated, the hash is returned immediately, // ignoring the rest of the hash policy list. repeated HashPolicy hash_policy = 15; // Indicates that the route has a CORS policy. CorsPolicy cors = 17; repeated UpgradeConfig upgrade_configs = 25; // If present, Envoy will try to follow an upstream redirect response instead of proxying the // response back to the downstream. An upstream redirect response is defined // by :ref:`redirect_response_codes // `. InternalRedirectPolicy internal_redirect_policy = 34; // Indicates that the route has a hedge policy. Note that if this is set, // it'll take precedence over the virtual host level hedge policy entirely // (e.g.: policies are not merged, most internal one becomes the enforced policy). HedgePolicy hedge_policy = 27; // Specifies the maximum stream duration for this route. MaxStreamDuration max_stream_duration = 36; } // HTTP retry :ref:`architecture overview `. // [#next-free-field: 12] message RetryPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy"; enum ResetHeaderFormat { SECONDS = 0; UNIX_TIMESTAMP = 1; } message RetryPriority { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy.RetryPriority"; reserved 2; reserved "config"; string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; } } message RetryHostPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy.RetryHostPredicate"; reserved 2; reserved "config"; string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { google.protobuf.Any typed_config = 3; } } message RetryBackOff { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy.RetryBackOff"; // Specifies the base interval between retries. This parameter is required and must be greater // than zero. Values less than 1 ms are rounded up to 1 ms. // See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion of Envoy's // back-off algorithm. google.protobuf.Duration base_interval = 1 [(validate.rules).duration = { required: true gt {} }]; // Specifies the maximum interval between retries. This parameter is optional, but must be // greater than or equal to the `base_interval` if set. The default is 10 times the // `base_interval`. See :ref:`config_http_filters_router_x-envoy-max-retries` for a discussion // of Envoy's back-off algorithm. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; } message ResetHeader { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy.ResetHeader"; string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; ResetHeaderFormat format = 2 [(validate.rules).enum = {defined_only: true}]; } // A retry back-off strategy that applies when the upstream server rate limits // the request. // // Given this configuration: // // .. code-block:: yaml // // rate_limited_retry_back_off: // reset_headers: // - name: Retry-After // format: SECONDS // - name: X-RateLimit-Reset // format: UNIX_TIMESTAMP // max_interval: "300s" // // The following algorithm will apply: // // 1. If the response contains the header ``Retry-After`` its value must be on // the form ``120`` (an integer that represents the number of seconds to // wait before retrying). If so, this value is used as the back-off interval. // 2. Otherwise, if the response contains the header ``X-RateLimit-Reset`` its // value must be on the form ``1595320702`` (an integer that represents the // point in time at which to retry, as a Unix timestamp in seconds). If so, // the current time is subtracted from this value and the result is used as // the back-off interval. // 3. Otherwise, Envoy will use the default // :ref:`exponential back-off ` // strategy. // // No matter which format is used, if the resulting back-off interval exceeds // ``max_interval`` it is discarded and the next header in ``reset_headers`` // is tried. If a request timeout is configured for the route it will further // limit how long the request will be allowed to run. // // To prevent many clients retrying at the same point in time jitter is added // to the back-off interval, so the resulting interval is decided by taking: // ``random(interval, interval * 1.5)``. // // .. attention:: // // Configuring ``rate_limited_retry_back_off`` will not by itself cause a request // to be retried. You will still need to configure the right retry policy to match // the responses from the upstream server. message RateLimitedRetryBackOff { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RetryPolicy.RateLimitedRetryBackOff"; // Specifies the reset headers (like ``Retry-After`` or ``X-RateLimit-Reset``) // to match against the response. Headers are tried in order, and matched case // insensitive. The first header to be parsed successfully is used. If no headers // match the default exponential back-off is used instead. repeated ResetHeader reset_headers = 1 [(validate.rules).repeated = {min_items: 1}]; // Specifies the maximum back off interval that Envoy will allow. If a reset // header contains an interval longer than this then it will be discarded and // the next header will be tried. Defaults to 300 seconds. google.protobuf.Duration max_interval = 2 [(validate.rules).duration = {gt {}}]; } // Specifies the conditions under which retry takes place. These are the same // conditions documented for :ref:`config_http_filters_router_x-envoy-retry-on` and // :ref:`config_http_filters_router_x-envoy-retry-grpc-on`. string retry_on = 1; // Specifies the allowed number of retries. This parameter is optional and // defaults to 1. These are the same conditions documented for // :ref:`config_http_filters_router_x-envoy-max-retries`. google.protobuf.UInt32Value max_retries = 2; // Specifies a non-zero upstream timeout per retry attempt. This parameter is optional. The // same conditions documented for // :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` apply. // // .. note:: // // If left unspecified, Envoy will use the global // :ref:`route timeout ` for the request. // Consequently, when using a :ref:`5xx ` based // retry policy, a request that times out will not be retried as the total timeout budget // would have been exhausted. google.protobuf.Duration per_try_timeout = 3; // Specifies an implementation of a RetryPriority which is used to determine the // distribution of load across priorities used for retries. Refer to // :ref:`retry plugin configuration ` for more details. RetryPriority retry_priority = 4; // Specifies a collection of RetryHostPredicates that will be consulted when selecting a host // for retries. If any of the predicates reject the host, host selection will be reattempted. // Refer to :ref:`retry plugin configuration ` for more // details. repeated RetryHostPredicate retry_host_predicate = 5; // The maximum number of times host selection will be reattempted before giving up, at which // point the host that was last selected will be routed to. If unspecified, this will default to // retrying once. int64 host_selection_retry_max_attempts = 6; // HTTP status codes that should trigger a retry in addition to those specified by retry_on. repeated uint32 retriable_status_codes = 7; // Specifies parameters that control exponential retry back off. This parameter is optional, in which case the // default base interval is 25 milliseconds or, if set, the current value of the // `upstream.base_retry_backoff_ms` runtime parameter. The default maximum interval is 10 times // the base interval. The documentation for :ref:`config_http_filters_router_x-envoy-max-retries` // describes Envoy's back-off algorithm. RetryBackOff retry_back_off = 8; // Specifies parameters that control a retry back-off strategy that is used // when the request is rate limited by the upstream server. The server may // return a response header like ``Retry-After`` or ``X-RateLimit-Reset`` to // provide feedback to the client on how long to wait before retrying. If // configured, this back-off strategy will be used instead of the // default exponential back off strategy (configured using `retry_back_off`) // whenever a response includes the matching headers. RateLimitedRetryBackOff rate_limited_retry_back_off = 11; // HTTP response headers that trigger a retry if present in the response. A retry will be // triggered if any of the header matches match the upstream response headers. // The field is only consulted if 'retriable-headers' retry policy is active. repeated HeaderMatcher retriable_headers = 9; // HTTP headers which must be present in the request for retries to be attempted. repeated HeaderMatcher retriable_request_headers = 10; } // HTTP request hedging :ref:`architecture overview `. message HedgePolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.HedgePolicy"; // Specifies the number of initial requests that should be sent upstream. // Must be at least 1. // Defaults to 1. // [#not-implemented-hide:] google.protobuf.UInt32Value initial_requests = 1 [(validate.rules).uint32 = {gte: 1}]; // Specifies a probability that an additional upstream request should be sent // on top of what is specified by initial_requests. // Defaults to 0. // [#not-implemented-hide:] type.v3.FractionalPercent additional_request_chance = 2; // Indicates that a hedged request should be sent when the per-try timeout // is hit. This will only occur if the retry policy also indicates that a // timed out request should be retried. // Once a timed out request is retried due to per try timeout, the router // filter will ensure that it is not retried again even if the returned // response headers would otherwise be retried according the specified // :ref:`RetryPolicy `. // Defaults to false. bool hedge_on_per_try_timeout = 3; } // [#next-free-field: 9] message RedirectAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RedirectAction"; enum RedirectResponseCode { // Moved Permanently HTTP Status Code - 301. MOVED_PERMANENTLY = 0; // Found HTTP Status Code - 302. FOUND = 1; // See Other HTTP Status Code - 303. SEE_OTHER = 2; // Temporary Redirect HTTP Status Code - 307. TEMPORARY_REDIRECT = 3; // Permanent Redirect HTTP Status Code - 308. PERMANENT_REDIRECT = 4; } // When the scheme redirection take place, the following rules apply: // 1. If the source URI scheme is `http` and the port is explicitly // set to `:80`, the port will be removed after the redirection // 2. If the source URI scheme is `https` and the port is explicitly // set to `:443`, the port will be removed after the redirection oneof scheme_rewrite_specifier { // The scheme portion of the URL will be swapped with "https". bool https_redirect = 4; // The scheme portion of the URL will be swapped with this value. string scheme_redirect = 7; } // The host portion of the URL will be swapped with this value. string host_redirect = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The port value of the URL will be swapped with this value. uint32 port_redirect = 8; oneof path_rewrite_specifier { // The path portion of the URL will be swapped with this value. // Please note that query string in path_redirect will override the // request's query string and will not be stripped. // // For example, let's say we have the following routes: // // - match: { path: "/old-path-1" } // redirect: { path_redirect: "/new-path-1" } // - match: { path: "/old-path-2" } // redirect: { path_redirect: "/new-path-2", strip-query: "true" } // - match: { path: "/old-path-3" } // redirect: { path_redirect: "/new-path-3?foo=1", strip_query: "true" } // // 1. if request uri is "/old-path-1?bar=1", users will be redirected to "/new-path-1?bar=1" // 2. if request uri is "/old-path-2?bar=1", users will be redirected to "/new-path-2" // 3. if request uri is "/old-path-3?bar=1", users will be redirected to "/new-path-3?foo=1" string path_redirect = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Indicates that during redirection, the matched prefix (or path) // should be swapped with this value. This option allows redirect URLs be dynamically created // based on the request. // // .. attention:: // // Pay attention to the use of trailing slashes as mentioned in // :ref:`RouteAction's prefix_rewrite `. string prefix_rewrite = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // The HTTP status code to use in the redirect response. The default response // code is MOVED_PERMANENTLY (301). RedirectResponseCode response_code = 3 [(validate.rules).enum = {defined_only: true}]; // Indicates that during redirection, the query portion of the URL will // be removed. Default value is false. bool strip_query = 6; } message DirectResponseAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.DirectResponseAction"; // Specifies the HTTP response status to be returned. uint32 status = 1 [(validate.rules).uint32 = {lt: 600 gte: 100}]; // Specifies the content of the response body. If this setting is omitted, // no body is included in the generated response. // // .. note:: // // Headers can be specified using *response_headers_to_add* in the enclosing // :ref:`envoy_api_msg_config.route.v4alpha.Route`, :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` or // :ref:`envoy_api_msg_config.route.v4alpha.VirtualHost`. core.v4alpha.DataSource body = 2; } message Decorator { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Decorator"; // The operation name associated with the request matched to this route. If tracing is // enabled, this information will be used as the span name reported for this request. // // .. note:: // // For ingress (inbound) requests, or egress (outbound) responses, this value may be overridden // by the :ref:`x-envoy-decorator-operation // ` header. string operation = 1 [(validate.rules).string = {min_len: 1}]; // Whether the decorated details should be propagated to the other party. The default is true. google.protobuf.BoolValue propagate = 2; } message Tracing { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.Tracing"; // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% type.v3.FractionalPercent client_sampling = 1; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.v3.FractionalPercent random_sampling = 2; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random // sampling). This field functions as an upper limit on the total configured sampling rate. For // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% // of client requests with the appropriate headers to be force traced. This field is a direct // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.v3.FractionalPercent overall_sampling = 3; // A list of custom tags with unique tag name to create tags for the active span. // It will take effect after merging with the :ref:`corresponding configuration // ` // configured in the HTTP connection manager. If two tags with the same name are configured // each in the HTTP connection manager and the route level, the one configured here takes // priority. repeated type.tracing.v3.CustomTag custom_tags = 4; } // A virtual cluster is a way of specifying a regex matching rule against // certain important endpoints such that statistics are generated explicitly for // the matched requests. The reason this is useful is that when doing // prefix/path matching Envoy does not always know what the application // considers to be an endpoint. Thus, it’s impossible for Envoy to generically // emit per endpoint statistics. However, often systems have highly critical // endpoints that they wish to get “perfect” statistics on. Virtual cluster // statistics are perfect in the sense that they are emitted on the downstream // side such that they include network level failures. // // Documentation for :ref:`virtual cluster statistics `. // // .. note:: // // Virtual clusters are a useful tool, but we do not recommend setting up a virtual cluster for // every application endpoint. This is both not easily maintainable and as well the matching and // statistics output are not free. message VirtualCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.VirtualCluster"; reserved 1, 3; reserved "pattern", "method"; // Specifies a list of header matchers to use for matching requests. Each specified header must // match. The pseudo-headers `:path` and `:method` can be used to match the request path and // method, respectively. repeated HeaderMatcher headers = 4; // Specifies the name of the virtual cluster. The virtual cluster name as well // as the virtual host name are used when emitting statistics. The statistics are emitted by the // router filter and are documented :ref:`here `. string name = 2 [(validate.rules).string = {min_len: 1}]; } // Global rate limiting :ref:`architecture overview `. message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit"; // [#next-free-field: 8] message Action { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action"; // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("source_cluster", "") // // is derived from the :option:`--service-cluster` option. message SourceCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.SourceCluster"; } // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("destination_cluster", "") // // Once a request matches against a route table rule, a routed cluster is determined by one of // the following :ref:`route table configuration ` // settings: // // * :ref:`cluster ` indicates the upstream cluster // to route to. // * :ref:`weighted_clusters ` // chooses a cluster randomly from a set of clusters with attributed weight. // * :ref:`cluster_header ` indicates which // header in the request contains the target cluster. message DestinationCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.DestinationCluster"; } // The following descriptor entry is appended when a header contains a key that matches the // *header_name*: // // .. code-block:: cpp // // ("", "") message RequestHeaders { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.RequestHeaders"; // The header name to be queried from the request headers. The header’s // value is used to populate the value of the descriptor entry for the // descriptor_key. string header_name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The key to use in the descriptor entry. string descriptor_key = 2 [(validate.rules).string = {min_len: 1}]; // If set to true, Envoy skips the descriptor while calling rate limiting service // when header is not present in the request. By default it skips calling the // rate limiting service if this header is not present in the request. bool skip_if_absent = 3; } // The following descriptor entry is appended to the descriptor and is populated using the // trusted address from :ref:`x-forwarded-for `: // // .. code-block:: cpp // // ("remote_address", "") message RemoteAddress { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.RemoteAddress"; } // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("generic_key", "") message GenericKey { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.GenericKey"; // The value to use in the descriptor entry. string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // An optional key to use in the descriptor entry. If not set it defaults // to 'generic_key' as the descriptor key. string descriptor_key = 2; } // The following descriptor entry is appended to the descriptor: // // .. code-block:: cpp // // ("header_match", "") message HeaderValueMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.HeaderValueMatch"; // The value to use in the descriptor entry. string descriptor_value = 1 [(validate.rules).string = {min_len: 1}]; // If set to true, the action will append a descriptor entry when the // request matches the headers. If set to false, the action will append a // descriptor entry when the request does not match the headers. The // default value is true. google.protobuf.BoolValue expect_match = 2; // Specifies a set of headers that the rate limit action should match // on. The action will check the request’s headers against all the // specified headers in the config. A match will happen if all the // headers in the config are present in the request with the same values // (or based on presence if the value field is not in the config). repeated HeaderMatcher headers = 3 [(validate.rules).repeated = {min_items: 1}]; } // The following descriptor entry is appended when the dynamic metadata contains a key value: // // .. code-block:: cpp // // ("", "") message DynamicMetaData { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Action.DynamicMetaData"; // The key to use in the descriptor entry. string descriptor_key = 1 [(validate.rules).string = {min_len: 1}]; // Metadata struct that defines the key and path to retrieve the string value. A match will // only happen if the value in the dynamic metadata is of type string. type.metadata.v3.MetadataKey metadata_key = 2 [(validate.rules).message = {required: true}]; // An optional value to use if *metadata_key* is empty. If not set and // no value is present under the metadata_key then no descriptor is generated. string default_value = 3; } oneof action_specifier { option (validate.required) = true; // Rate limit on source cluster. SourceCluster source_cluster = 1; // Rate limit on destination cluster. DestinationCluster destination_cluster = 2; // Rate limit on request headers. RequestHeaders request_headers = 3; // Rate limit on remote address. RemoteAddress remote_address = 4; // Rate limit on a generic key. GenericKey generic_key = 5; // Rate limit on the existence of request headers. HeaderValueMatch header_value_match = 6; // Rate limit on dynamic metadata. DynamicMetaData dynamic_metadata = 7; } } message Override { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Override"; // Fetches the override from the dynamic metadata. message DynamicMetadata { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.RateLimit.Override.DynamicMetadata"; // Metadata struct that defines the key and path to retrieve the struct value. // The value must be a struct containing an integer "requests_per_unit" property // and a "unit" property with a value parseable to :ref:`RateLimitUnit // enum ` type.metadata.v3.MetadataKey metadata_key = 1 [(validate.rules).message = {required: true}]; } oneof override_specifier { option (validate.required) = true; // Limit override from dynamic metadata. DynamicMetadata dynamic_metadata = 1; } } // Refers to the stage set in the filter. The rate limit configuration only // applies to filters with the same stage number. The default stage number is // 0. // // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. google.protobuf.UInt32Value stage = 1 [(validate.rules).uint32 = {lte: 10}]; // The key to be set in runtime to disable this rate limit configuration. string disable_key = 2; // A list of actions that are to be applied for this rate limit configuration. // Order matters as the actions are processed sequentially and the descriptor // is composed by appending descriptor entries in that sequence. If an action // cannot append a descriptor entry, no descriptor is generated for the // configuration. See :ref:`composing actions // ` for additional documentation. repeated Action actions = 3 [(validate.rules).repeated = {min_items: 1}]; // An optional limit override to be appended to the descriptor produced by this // rate limit configuration. If the override value is invalid or cannot be resolved // from metadata, no override is provided. See :ref:`rate limit override // ` for more information. Override limit = 4; } // .. attention:: // // Internally, Envoy always uses the HTTP/2 *:authority* header to represent the HTTP/1 *Host* // header. Thus, if attempting to match on *Host*, match on *:authority* instead. // // .. attention:: // // To route on HTTP method, use the special HTTP/2 *:method* header. This works for both // HTTP/1 and HTTP/2 as Envoy normalizes headers. E.g., // // .. code-block:: json // // { // "name": ":method", // "exact_match": "POST" // } // // .. attention:: // In the absence of any header match specifier, match will default to :ref:`present_match // `. i.e, a request that has the :ref:`name // ` header will match, regardless of the header's // value. // // [#next-major-version: HeaderMatcher should be refactored to use StringMatcher.] // [#next-free-field: 13] message HeaderMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.HeaderMatcher"; reserved 2, 3, 5; reserved "regex_match"; // Specifies the name of the header in the request. string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // Specifies how the header match will be performed to route the request. oneof header_match_specifier { // If specified, header match will be performed based on the value of the header. string exact_match = 4; // If specified, this regex string is a regular expression rule which implies the entire request // header value must match the regex. The rule will not match if only a subsequence of the // request header value matches the regex. type.matcher.v4alpha.RegexMatcher safe_regex_match = 11; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. // The entire request header value must represent an integer in base 10 notation: consisting of // an optional plus or minus sign followed by a sequence of digits. The rule will not match if // the header value does not represent an integer. Match will fail for empty values, floating // point numbers or if only a subsequence of the header value is an integer. // // Examples: // // * For range [-10,0), route will match for header value -1, but not for 0, "somestring", 10.9, // "-1somestring" type.v3.Int64Range range_match = 6; // If specified, header match will be performed based on whether the header is in the // request. bool present_match = 7; // If specified, header match will be performed based on the prefix of the header value. // Note: empty prefix is not allowed, please use present_match instead. // // Examples: // // * The prefix *abcd* matches the value *abcdxyz*, but not for *abcxyz*. string prefix_match = 9 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on the suffix of the header value. // Note: empty suffix is not allowed, please use present_match instead. // // Examples: // // * The suffix *abcd* matches the value *xyzabcd*, but not for *xyzbcd*. string suffix_match = 10 [(validate.rules).string = {min_len: 1}]; // If specified, header match will be performed based on whether the header value contains // the given value or not. // Note: empty contains match is not allowed, please use present_match instead. // // Examples: // // * The value *abcd* matches the value *xyzabcdpqr*, but not for *xyzbcdpqr*. string contains_match = 12 [(validate.rules).string = {min_len: 1}]; } // If specified, the match result will be inverted before checking. Defaults to false. // // Examples: // // * The regex ``\d{3}`` does not match the value *1234*, so it will match when inverted. // * The range [-10,0) will match the value -1, so it will not match when inverted. bool invert_match = 8; } // Query parameter matching treats the query string of a request's :path header // as an ampersand-separated list of keys and/or key=value elements. // [#next-free-field: 7] message QueryParameterMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.QueryParameterMatcher"; reserved 3, 4; reserved "value", "regex"; // Specifies the name of a key that must be present in the requested // *path*'s query string. string name = 1 [(validate.rules).string = {min_len: 1 max_bytes: 1024}]; oneof query_parameter_match_specifier { // Specifies whether a query parameter value should match against a string. type.matcher.v4alpha.StringMatcher string_match = 5 [(validate.rules).message = {required: true}]; // Specifies whether a query parameter should be present. bool present_match = 6; } } // HTTP Internal Redirect :ref:`architecture overview `. message InternalRedirectPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.InternalRedirectPolicy"; // An internal redirect is not handled, unless the number of previous internal redirects that a // downstream request has encountered is lower than this value. // In the case where a downstream request is bounced among multiple routes by internal redirect, // the first route that hits this threshold, or does not set :ref:`internal_redirect_policy // ` // will pass the redirect back to downstream. // // If not specified, at most one redirect will be followed. google.protobuf.UInt32Value max_internal_redirects = 1; // Defines what upstream response codes are allowed to trigger internal redirect. If unspecified, // only 302 will be treated as internal redirect. // Only 301, 302, 303, 307 and 308 are valid values. Any other codes will be ignored. repeated uint32 redirect_response_codes = 2 [(validate.rules).repeated = {max_items: 5}]; // Specifies a list of predicates that are queried when an upstream response is deemed // to trigger an internal redirect by all other criteria. Any predicate in the list can reject // the redirect, causing the response to be proxied to downstream. repeated core.v4alpha.TypedExtensionConfig predicates = 3; // Allow internal redirect to follow a target URI with a different scheme than the value of // x-forwarded-proto. The default is false. bool allow_cross_scheme_redirect = 4; } ================================================ FILE: api/envoy/config/route/v4alpha/scoped_route.proto ================================================ syntax = "proto3"; package envoy.config.route.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.route.v4alpha"; option java_outer_classname = "ScopedRouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: HTTP scoped routing configuration] // * Routing :ref:`architecture overview ` // Specifies a routing scope, which associates a // :ref:`Key` to a // :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` (identified by its resource name). // // The HTTP connection manager builds up a table consisting of these Key to // RouteConfiguration mappings, and looks up the RouteConfiguration to use per // request according to the algorithm specified in the // :ref:`scope_key_builder` // assigned to the HttpConnectionManager. // // For example, with the following configurations (in YAML): // // HttpConnectionManager config: // // .. code:: // // ... // scoped_routes: // name: foo-scoped-routes // scope_key_builder: // fragments: // - header_value_extractor: // name: X-Route-Selector // element_separator: , // element: // separator: = // key: vip // // ScopedRouteConfiguration resources (specified statically via // :ref:`scoped_route_configurations_list` // or obtained dynamically via SRDS): // // .. code:: // // (1) // name: route-scope1 // route_configuration_name: route-config1 // key: // fragments: // - string_key: 172.10.10.20 // // (2) // name: route-scope2 // route_configuration_name: route-config2 // key: // fragments: // - string_key: 172.20.20.30 // // A request from a client such as: // // .. code:: // // GET / HTTP/1.1 // Host: foo.com // X-Route-Selector: vip=172.10.10.20 // // would result in the routing table defined by the `route-config1` // RouteConfiguration being assigned to the HTTP request/stream. // message ScopedRouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.ScopedRouteConfiguration"; // Specifies a key which is matched against the output of the // :ref:`scope_key_builder` // specified in the HttpConnectionManager. The matching is done per HTTP // request and is dependent on the order of the fragments contained in the // Key. message Key { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.ScopedRouteConfiguration.Key"; message Fragment { option (udpa.annotations.versioning).previous_message_type = "envoy.config.route.v3.ScopedRouteConfiguration.Key.Fragment"; oneof type { option (validate.required) = true; // A string to match against. string string_key = 1; } } // The ordered set of fragments to match against. The order must match the // fragments in the corresponding // :ref:`scope_key_builder`. repeated Fragment fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } // Whether the RouteConfiguration should be loaded on demand. bool on_demand = 4; // The name assigned to the routing scope. string name = 1 [(validate.rules).string = {min_len: 1}]; // The resource name to use for a :ref:`envoy_api_msg_service.discovery.v4alpha.DiscoveryRequest` to an // RDS server to fetch the :ref:`envoy_api_msg_config.route.v4alpha.RouteConfiguration` associated // with this scope. string route_configuration_name = 2 [(validate.rules).string = {min_len: 1}]; // The key to match against. Key key = 3 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/tap/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/route/v3:pkg", "//envoy/service/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/tap/v3/common.proto ================================================ syntax = "proto3"; package envoy.config.tap.v3; import "envoy/config/common/matcher/v3/matcher.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.tap.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common tap configuration] // Tap configuration. message TapConfig { // [#comment:TODO(mattklein123): Rate limiting] option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.TapConfig"; // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. // Exactly one of :ref:`match ` and // :ref:`match_config ` must be set. If both // are set, the :ref:`match ` will be used. MatchPredicate match_config = 1 [deprecated = true]; // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. // Exactly one of :ref:`match ` and // :ref:`match_config ` must be set. If both // are set, the :ref:`match ` will be used. common.matcher.v3.MatchPredicate match = 4; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for // which the tap matching is enabled. When not enabled, the request\connection will not be // recorded. // // .. note:: // // This field defaults to 100/:ref:`HUNDRED // `. core.v3.RuntimeFractionalPercent tap_enabled = 3; } // Tap match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. // [#next-free-field: 11] message MatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.MatchPredicate"; // A set of match configurations used for logical operations. message MatchSet { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.MatchPredicate.MatchSet"; // The list of rules that make up the set. repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { option (validate.required) = true; // A set that describes a logical OR. If any member of the set matches, the match configuration // matches. MatchSet or_match = 1; // A set that describes a logical AND. If all members of the set match, the match configuration // matches. MatchSet and_match = 2; // A negation match. The match configuration will match if the negated match condition matches. MatchPredicate not_match = 3; // The match configuration will always match. bool any_match = 4 [(validate.rules).bool = {const: true}]; // HTTP request headers match configuration. HttpHeadersMatch http_request_headers_match = 5; // HTTP request trailers match configuration. HttpHeadersMatch http_request_trailers_match = 6; // HTTP response headers match configuration. HttpHeadersMatch http_response_headers_match = 7; // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; // HTTP request generic body match configuration. HttpGenericBodyMatch http_request_generic_body_match = 9; // HTTP response generic body match configuration. HttpGenericBodyMatch http_response_generic_body_match = 10; } } // HTTP headers match configuration. message HttpHeadersMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.HttpHeadersMatch"; // HTTP headers to match. repeated route.v3.HeaderMatcher headers = 1; } // HTTP generic body match configuration. // List of text strings and hex strings to be located in HTTP body. // All specified strings must be found in the HTTP body for positive match. // The search may be limited to specified number of bytes from the body start. // // .. attention:: // // Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. // If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified // to scan only part of the http body. message HttpGenericBodyMatch { message GenericTextMatch { oneof rule { option (validate.required) = true; // Text string to be located in HTTP body. string string_match = 1 [(validate.rules).string = {min_len: 1}]; // Sequence of bytes to be located in HTTP body. bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; } } // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). uint32 bytes_limit = 1; // List of patterns to match. repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; } // Tap output configuration. message OutputConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.OutputConfig"; // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple // sink types are supported this constraint will be relaxed. repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; // For buffered tapping, the maximum amount of received body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated // ` field will be set. If not specified, the // default is 1KiB. google.protobuf.UInt32Value max_buffered_rx_bytes = 2; // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated // ` field will be set. If not specified, the // default is 1KiB. google.protobuf.UInt32Value max_buffered_tx_bytes = 3; // Indicates whether taps produce a single buffered message per tap, or multiple streamed // messages per tap in the emitted :ref:`TraceWrapper // ` messages. Note that streamed tapping does not // mean that no buffering takes place. Buffering may be required if data is processed before a // match can be determined. See the HTTP tap filter :ref:`streaming // ` documentation for more information. bool streaming = 4; } // Tap output sink configuration. message OutputSink { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.OutputSink"; // Output format. All output is in the form of one or more :ref:`TraceWrapper // ` messages. This enumeration indicates // how those messages are written. Note that not all sinks support all output formats. See // individual sink documentation for more information. enum Format { // Each message will be written as JSON. Any :ref:`body ` // data will be present in the :ref:`as_bytes // ` field. This means that body data will be // base64 encoded as per the `proto3 JSON mappings // `_. JSON_BODY_AS_BYTES = 0; // Each message will be written as JSON. Any :ref:`body ` // data will be present in the :ref:`as_string // ` field. This means that body data will be // string encoded as per the `proto3 JSON mappings // `_. This format type is // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the // user wishes to view it directly without being forced to base64 decode the body. JSON_BODY_AS_STRING = 1; // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes // multiple binary messages without any length information the data stream will not be // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) // this output format makes consumption simpler. PROTO_BINARY = 2; // Messages are written as a sequence tuples, where each tuple is the message length encoded // as a `protobuf 32-bit varint // `_ // followed by the binary message. The messages can be read back using the language specific // protobuf coded stream implementation to obtain the message length and the message. PROTO_BINARY_LENGTH_DELIMITED = 3; // Text proto format. PROTO_TEXT = 4; } // Sink output format. Format format = 1 [(validate.rules).enum = {defined_only: true}]; oneof output_sink_type { option (validate.required) = true; // Tap output will be streamed out the :http:post:`/tap` admin endpoint. // // .. attention:: // // It is only allowed to specify the streaming admin output sink if the tap is being // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has // been configured to receive tap configuration from some other source (e.g., static // file, XDS, etc.) configuring the streaming admin output type will fail. StreamingAdminSink streaming_admin = 2; // Tap output will be written to a file per tap sink. FilePerTapSink file_per_tap = 3; // [#not-implemented-hide:] // GrpcService to stream data to. The format argument must be PROTO_BINARY. // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] StreamingGrpcSink streaming_grpc = 4; } } // Streaming admin sink configuration. message StreamingAdminSink { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.StreamingAdminSink"; } // The file per tap sink outputs a discrete file for every tapped stream. message FilePerTapSink { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.FilePerTapSink"; // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC // server. message StreamingGrpcSink { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.StreamingGrpcSink"; // Opaque identifier, that will be sent back to the streaming grpc server. string tap_id = 1; // The gRPC server that hosts the Tap Sink Service. core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/tap/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/common/matcher/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/config/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/tap/v4alpha/common.proto ================================================ syntax = "proto3"; package envoy.config.tap.v4alpha; import "envoy/config/common/matcher/v4alpha/matcher.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.tap.v4alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Common tap configuration] // Tap configuration. message TapConfig { // [#comment:TODO(mattklein123): Rate limiting] option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.TapConfig"; reserved 1; reserved "match_config"; // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. // Exactly one of :ref:`match ` and // :ref:`match_config ` must be set. If both // are set, the :ref:`match ` will be used. common.matcher.v4alpha.MatchPredicate match = 4; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for // which the tap matching is enabled. When not enabled, the request\connection will not be // recorded. // // .. note:: // // This field defaults to 100/:ref:`HUNDRED // `. core.v4alpha.RuntimeFractionalPercent tap_enabled = 3; } // Tap match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. // [#next-free-field: 11] message MatchPredicate { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate"; // A set of match configurations used for logical operations. message MatchSet { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.MatchPredicate.MatchSet"; // The list of rules that make up the set. repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { option (validate.required) = true; // A set that describes a logical OR. If any member of the set matches, the match configuration // matches. MatchSet or_match = 1; // A set that describes a logical AND. If all members of the set match, the match configuration // matches. MatchSet and_match = 2; // A negation match. The match configuration will match if the negated match condition matches. MatchPredicate not_match = 3; // The match configuration will always match. bool any_match = 4 [(validate.rules).bool = {const: true}]; // HTTP request headers match configuration. HttpHeadersMatch http_request_headers_match = 5; // HTTP request trailers match configuration. HttpHeadersMatch http_request_trailers_match = 6; // HTTP response headers match configuration. HttpHeadersMatch http_response_headers_match = 7; // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; // HTTP request generic body match configuration. HttpGenericBodyMatch http_request_generic_body_match = 9; // HTTP response generic body match configuration. HttpGenericBodyMatch http_response_generic_body_match = 10; } } // HTTP headers match configuration. message HttpHeadersMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.HttpHeadersMatch"; // HTTP headers to match. repeated route.v4alpha.HeaderMatcher headers = 1; } // HTTP generic body match configuration. // List of text strings and hex strings to be located in HTTP body. // All specified strings must be found in the HTTP body for positive match. // The search may be limited to specified number of bytes from the body start. // // .. attention:: // // Searching for patterns in HTTP body is potentially cpu intensive. For each specified pattern, http body is scanned byte by byte to find a match. // If multiple patterns are specified, the process is repeated for each pattern. If location of a pattern is known, ``bytes_limit`` should be specified // to scan only part of the http body. message HttpGenericBodyMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.HttpGenericBodyMatch"; message GenericTextMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.HttpGenericBodyMatch.GenericTextMatch"; oneof rule { option (validate.required) = true; // Text string to be located in HTTP body. string string_match = 1 [(validate.rules).string = {min_len: 1}]; // Sequence of bytes to be located in HTTP body. bytes binary_match = 2 [(validate.rules).bytes = {min_len: 1}]; } } // Limits search to specified number of bytes - default zero (no limit - match entire captured buffer). uint32 bytes_limit = 1; // List of patterns to match. repeated GenericTextMatch patterns = 2 [(validate.rules).repeated = {min_items: 1}]; } // Tap output configuration. message OutputConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputConfig"; // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple // sink types are supported this constraint will be relaxed. repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; // For buffered tapping, the maximum amount of received body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated // ` field will be set. If not specified, the // default is 1KiB. google.protobuf.UInt32Value max_buffered_rx_bytes = 2; // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated // ` field will be set. If not specified, the // default is 1KiB. google.protobuf.UInt32Value max_buffered_tx_bytes = 3; // Indicates whether taps produce a single buffered message per tap, or multiple streamed // messages per tap in the emitted :ref:`TraceWrapper // ` messages. Note that streamed tapping does not // mean that no buffering takes place. Buffering may be required if data is processed before a // match can be determined. See the HTTP tap filter :ref:`streaming // ` documentation for more information. bool streaming = 4; } // Tap output sink configuration. message OutputSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.OutputSink"; // Output format. All output is in the form of one or more :ref:`TraceWrapper // ` messages. This enumeration indicates // how those messages are written. Note that not all sinks support all output formats. See // individual sink documentation for more information. enum Format { // Each message will be written as JSON. Any :ref:`body ` // data will be present in the :ref:`as_bytes // ` field. This means that body data will be // base64 encoded as per the `proto3 JSON mappings // `_. JSON_BODY_AS_BYTES = 0; // Each message will be written as JSON. Any :ref:`body ` // data will be present in the :ref:`as_string // ` field. This means that body data will be // string encoded as per the `proto3 JSON mappings // `_. This format type is // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the // user wishes to view it directly without being forced to base64 decode the body. JSON_BODY_AS_STRING = 1; // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes // multiple binary messages without any length information the data stream will not be // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) // this output format makes consumption simpler. PROTO_BINARY = 2; // Messages are written as a sequence tuples, where each tuple is the message length encoded // as a `protobuf 32-bit varint // `_ // followed by the binary message. The messages can be read back using the language specific // protobuf coded stream implementation to obtain the message length and the message. PROTO_BINARY_LENGTH_DELIMITED = 3; // Text proto format. PROTO_TEXT = 4; } // Sink output format. Format format = 1 [(validate.rules).enum = {defined_only: true}]; oneof output_sink_type { option (validate.required) = true; // Tap output will be streamed out the :http:post:`/tap` admin endpoint. // // .. attention:: // // It is only allowed to specify the streaming admin output sink if the tap is being // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has // been configured to receive tap configuration from some other source (e.g., static // file, XDS, etc.) configuring the streaming admin output type will fail. StreamingAdminSink streaming_admin = 2; // Tap output will be written to a file per tap sink. FilePerTapSink file_per_tap = 3; // [#not-implemented-hide:] // GrpcService to stream data to. The format argument must be PROTO_BINARY. // [#comment: TODO(samflattery): remove cleanup in uber_per_filter.cc once implemented] StreamingGrpcSink streaming_grpc = 4; } } // Streaming admin sink configuration. message StreamingAdminSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.StreamingAdminSink"; } // The file per tap sink outputs a discrete file for every tapped stream. message FilePerTapSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.FilePerTapSink"; // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). string path_prefix = 1 [(validate.rules).string = {min_len: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC // server. message StreamingGrpcSink { option (udpa.annotations.versioning).previous_message_type = "envoy.config.tap.v3.StreamingGrpcSink"; // Opaque identifier, that will be sent back to the streaming grpc server. string tap_id = 1; // The gRPC server that hosts the Tap Sink Service. core.v4alpha.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/trace/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", ], ) ================================================ FILE: api/envoy/config/trace/v2/datadog.proto ================================================ syntax = "proto3"; package envoy.config.trace.v2; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "DatadogProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Datadog tracer] // Configuration for the Datadog tracer. // [#extension: envoy.tracers.datadog] message DatadogConfig { // The cluster to use for submitting traces to the Datadog agent. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // The name used for the service when traces are generated by envoy. string service_name = 2 [(validate.rules).string = {min_bytes: 1}]; } ================================================ FILE: api/envoy/config/trace/v2/dynamic_ot.proto ================================================ syntax = "proto3"; package envoy.config.trace.v2; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "DynamicOtProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Dynamically loadable OpenTracing tracer] // DynamicOtConfig is used to dynamically load a tracer from a shared library // that implements the `OpenTracing dynamic loading API // `_. // [#extension: envoy.tracers.dynamic_ot] message DynamicOtConfig { // Dynamic library implementing the `OpenTracing API // `_. string library = 1 [(validate.rules).string = {min_bytes: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. google.protobuf.Struct config = 2; } ================================================ FILE: api/envoy/config/trace/v2/http_tracer.proto ================================================ syntax = "proto3"; package envoy.config.trace.v2; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "HttpTracerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tracing] // Tracing :ref:`architecture overview `. // The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. // // Envoy may support other tracers in the future, but right now the HTTP tracer is the only one // supported. // // .. attention:: // // Use of this message type has been deprecated in favor of direct use of // :ref:`Tracing.Http `. message Tracing { // Configuration for an HTTP tracer provider used by Envoy. // // The configuration is defined by the // :ref:`HttpConnectionManager.Tracing ` // :ref:`provider ` // field. message Http { // The name of the HTTP trace driver to instantiate. The name must match a // supported HTTP trace driver. Built-in trace drivers: // // - *envoy.tracers.lightstep* // - *envoy.tracers.zipkin* // - *envoy.tracers.dynamic_ot* // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* // - *envoy.tracers.xray* string name = 1 [(validate.rules).string = {min_bytes: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: // // - :ref:`LightstepConfig ` // - :ref:`ZipkinConfig ` // - :ref:`DynamicOtConfig ` // - :ref:`DatadogConfig ` // - :ref:`OpenCensusConfig ` // - :ref:`AWS X-Ray ` oneof config_type { google.protobuf.Struct config = 2 [deprecated = true]; google.protobuf.Any typed_config = 3; } } // Provides configuration for the HTTP tracer. Http http = 1; } ================================================ FILE: api/envoy/config/trace/v2/lightstep.proto ================================================ syntax = "proto3"; package envoy.config.trace.v2; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "LightstepProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: LightStep tracer] // Configuration for the LightStep tracer. // [#extension: envoy.tracers.lightstep] message LightstepConfig { // Available propagation modes enum PropagationMode { // Propagate trace context in the single header x-ot-span-context. ENVOY = 0; // Propagate trace context using LightStep's native format. LIGHTSTEP = 1; // Propagate trace context using the b3 format. B3 = 2; // Propagation trace context using the w3 trace-context standard. TRACE_CONTEXT = 3; } // The cluster manager cluster that hosts the LightStep collectors. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // File containing the access token to the `LightStep // `_ API. string access_token_file = 2 [(validate.rules).string = {min_bytes: 1}]; // Propagation modes to use by LightStep's tracer. repeated PropagationMode propagation_modes = 3 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; } ================================================ FILE: api/envoy/config/trace/v2/opencensus.proto ================================================ syntax = "proto3"; package envoy.config.trace.v2; import "envoy/api/v2/core/grpc_service.proto"; import "opencensus/proto/trace/v1/trace_config.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "OpencensusProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: OpenCensus tracer] // Configuration for the OpenCensus tracer. // [#next-free-field: 15] // [#extension: envoy.tracers.opencensus] message OpenCensusConfig { enum TraceContext { // No-op default, no trace context is utilized. NONE = 0; // W3C Trace-Context format "traceparent:" header. TRACE_CONTEXT = 1; // Binary "grpc-trace-bin:" header. GRPC_TRACE_BIN = 2; // "X-Cloud-Trace-Context:" header. CLOUD_TRACE_CONTEXT = 3; // X-B3-* headers. B3 = 4; } reserved 7; // Configures tracing, e.g. the sampler, max number of annotations, etc. opencensus.proto.trace.v1.TraceConfig trace_config = 1; // Enables the stdout exporter if set to true. This is intended for debugging // purposes. bool stdout_exporter_enabled = 2; // Enables the Stackdriver exporter if set to true. The project_id must also // be set. bool stackdriver_exporter_enabled = 3; // The Cloud project_id to use for Stackdriver tracing. string stackdriver_project_id = 4; // (optional) By default, the Stackdriver exporter will connect to production // Stackdriver. If stackdriver_address is non-empty, it will instead connect // to this address, which is in the gRPC format: // https://github.com/grpc/grpc/blob/master/doc/naming.md string stackdriver_address = 10; // (optional) The gRPC server that hosts Stackdriver tracing service. Only // Google gRPC is supported. If :ref:`target_uri ` // is not provided, the default production Stackdriver address will be used. api.v2.core.GrpcService stackdriver_grpc_service = 13; // Enables the Zipkin exporter if set to true. The url and service name must // also be set. bool zipkin_exporter_enabled = 5; // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans" string zipkin_url = 6; // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or // ocagent_grpc_service must also be set. bool ocagent_exporter_enabled = 11; // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC // format: https://github.com/grpc/grpc/blob/master/doc/naming.md // [#comment:TODO: deprecate this field] string ocagent_address = 12; // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. // This is only used if the ocagent_address is left empty. api.v2.core.GrpcService ocagent_grpc_service = 14; // List of incoming trace context headers we will accept. First one found // wins. repeated TraceContext incoming_trace_context = 8; // List of outgoing trace context headers we will produce. repeated TraceContext outgoing_trace_context = 9; } ================================================ FILE: api/envoy/config/trace/v2/service.proto ================================================ syntax = "proto3"; package envoy.config.trace.v2; import "envoy/api/v2/core/grpc_service.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "ServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Trace Service] // Configuration structure. message TraceServiceConfig { // The upstream gRPC cluster that hosts the metrics service. api.v2.core.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/trace/v2/trace.proto ================================================ syntax = "proto3"; package envoy.config.trace.v2; import "udpa/annotations/status.proto"; import public "envoy/config/trace/v2/datadog.proto"; import public "envoy/config/trace/v2/dynamic_ot.proto"; import public "envoy/config/trace/v2/http_tracer.proto"; import public "envoy/config/trace/v2/lightstep.proto"; import public "envoy/config/trace/v2/opencensus.proto"; import public "envoy/config/trace/v2/service.proto"; import public "envoy/config/trace/v2/zipkin.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "TraceProto"; option java_multiple_files = true; ================================================ FILE: api/envoy/config/trace/v2/zipkin.proto ================================================ syntax = "proto3"; package envoy.config.trace.v2; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2"; option java_outer_classname = "ZipkinProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Zipkin tracer] // Configuration for the Zipkin tracer. // [#extension: envoy.tracers.zipkin] // [#next-free-field: 6] message ZipkinConfig { // Available Zipkin collector endpoint versions. enum CollectorEndpointVersion { // Zipkin API v1, JSON over HTTP. // [#comment: The default implementation of Zipkin client before this field is added was only v1 // and the way user configure this was by not explicitly specifying the version. Consequently, // before this is added, the corresponding Zipkin collector expected to receive v1 payload. // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, // since in Zipkin realm this v1 version is considered to be not preferable anymore.] HTTP_JSON_V1 = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; // Zipkin API v2, JSON over HTTP. HTTP_JSON = 1; // Zipkin API v2, protobuf over HTTP. HTTP_PROTO = 2; // [#not-implemented-hide:] GRPC = 3; } // The cluster manager cluster that hosts the Zipkin collectors. Note that the // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster // resources `. string collector_cluster = 1 [(validate.rules).string = {min_bytes: 1}]; // The API endpoint of the Zipkin service where the spans will be sent. When // using a standard Zipkin installation, the API endpoint is typically // /api/v1/spans, which is the default value. string collector_endpoint = 2 [(validate.rules).string = {min_bytes: 1}]; // Determines whether a 128bit trace id will be used when creating a new // trace instance. The default value is false, which will result in a 64 bit trace id being used. bool trace_id_128bit = 3; // Determines whether client and server spans will share the same span context. // The default value is true. google.protobuf.BoolValue shared_span_context = 4; // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be // used. CollectorEndpointVersion collector_endpoint_version = 5; } ================================================ FILE: api/envoy/config/trace/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/trace/v2alpha/xray.proto ================================================ syntax = "proto3"; package envoy.config.trace.v2alpha; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v2alpha"; option java_outer_classname = "XrayProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: AWS X-Ray Tracer Configuration] // Configuration for AWS X-Ray tracer message XRayConfig { // The UDP endpoint of the X-Ray Daemon where the spans will be sent. // If this value is not set, the default value of 127.0.0.1:2000 will be used. api.v2.core.SocketAddress daemon_endpoint = 1; // The name of the X-Ray segment. string segment_name = 2 [(validate.rules).string = {min_len: 1}]; // The location of a local custom sampling rules JSON file. // For an example of the sampling rules see: // `X-Ray SDK documentation // `_ api.v2.core.DataSource sampling_rule_manifest = 3; } ================================================ FILE: api/envoy/config/trace/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/trace/v2:pkg", "//envoy/config/trace/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", ], ) ================================================ FILE: api/envoy/config/trace/v3/datadog.proto ================================================ syntax = "proto3"; package envoy.config.trace.v3; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "DatadogProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.datadog.v4alpha"; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Datadog tracer] // Configuration for the Datadog tracer. // [#extension: envoy.tracers.datadog] message DatadogConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.DatadogConfig"; // The cluster to use for submitting traces to the Datadog agent. string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The name used for the service when traces are generated by envoy. string service_name = 2 [(validate.rules).string = {min_len: 1}]; } ================================================ FILE: api/envoy/config/trace/v3/dynamic_ot.proto ================================================ syntax = "proto3"; package envoy.config.trace.v3; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "DynamicOtProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.dynamic_ot.v4alpha"; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamically loadable OpenTracing tracer] // DynamicOtConfig is used to dynamically load a tracer from a shared library // that implements the `OpenTracing dynamic loading API // `_. // [#extension: envoy.tracers.dynamic_ot] message DynamicOtConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.DynamicOtConfig"; // Dynamic library implementing the `OpenTracing API // `_. string library = 1 [(validate.rules).string = {min_len: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. google.protobuf.Struct config = 2; } ================================================ FILE: api/envoy/config/trace/v3/http_tracer.proto ================================================ syntax = "proto3"; package envoy.config.trace.v3; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "HttpTracerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tracing] // Tracing :ref:`architecture overview `. // The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. // // Envoy may support other tracers in the future, but right now the HTTP tracer is the only one // supported. // // .. attention:: // // Use of this message type has been deprecated in favor of direct use of // :ref:`Tracing.Http `. message Tracing { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.Tracing"; // Configuration for an HTTP tracer provider used by Envoy. // // The configuration is defined by the // :ref:`HttpConnectionManager.Tracing ` // :ref:`provider ` // field. message Http { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.Tracing.Http"; reserved 2; reserved "config"; // The name of the HTTP trace driver to instantiate. The name must match a // supported HTTP trace driver. Built-in trace drivers: // // - *envoy.tracers.lightstep* // - *envoy.tracers.zipkin* // - *envoy.tracers.dynamic_ot* // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* // - *envoy.tracers.xray* string name = 1 [(validate.rules).string = {min_len: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: // // - :ref:`LightstepConfig ` // - :ref:`ZipkinConfig ` // - :ref:`DynamicOtConfig ` // - :ref:`DatadogConfig ` // - :ref:`OpenCensusConfig ` // - :ref:`AWS X-Ray ` oneof config_type { google.protobuf.Any typed_config = 3; } } // Provides configuration for the HTTP tracer. Http http = 1; } ================================================ FILE: api/envoy/config/trace/v3/lightstep.proto ================================================ syntax = "proto3"; package envoy.config.trace.v3; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "LightstepProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.lightstep.v4alpha"; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: LightStep tracer] // Configuration for the LightStep tracer. // [#extension: envoy.tracers.lightstep] message LightstepConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.LightstepConfig"; // Available propagation modes enum PropagationMode { // Propagate trace context in the single header x-ot-span-context. ENVOY = 0; // Propagate trace context using LightStep's native format. LIGHTSTEP = 1; // Propagate trace context using the b3 format. B3 = 2; // Propagation trace context using the w3 trace-context standard. TRACE_CONTEXT = 3; } // The cluster manager cluster that hosts the LightStep collectors. string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // File containing the access token to the `LightStep // `_ API. string access_token_file = 2 [(validate.rules).string = {min_len: 1}]; // Propagation modes to use by LightStep's tracer. repeated PropagationMode propagation_modes = 3 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; } ================================================ FILE: api/envoy/config/trace/v3/opencensus.proto ================================================ syntax = "proto3"; package envoy.config.trace.v3; import "envoy/config/core/v3/grpc_service.proto"; import "opencensus/proto/trace/v1/trace_config.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "OpencensusProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.opencensus.v4alpha"; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: OpenCensus tracer] // Configuration for the OpenCensus tracer. // [#next-free-field: 15] // [#extension: envoy.tracers.opencensus] message OpenCensusConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.OpenCensusConfig"; enum TraceContext { // No-op default, no trace context is utilized. NONE = 0; // W3C Trace-Context format "traceparent:" header. TRACE_CONTEXT = 1; // Binary "grpc-trace-bin:" header. GRPC_TRACE_BIN = 2; // "X-Cloud-Trace-Context:" header. CLOUD_TRACE_CONTEXT = 3; // X-B3-* headers. B3 = 4; } reserved 7; // Configures tracing, e.g. the sampler, max number of annotations, etc. opencensus.proto.trace.v1.TraceConfig trace_config = 1; // Enables the stdout exporter if set to true. This is intended for debugging // purposes. bool stdout_exporter_enabled = 2; // Enables the Stackdriver exporter if set to true. The project_id must also // be set. bool stackdriver_exporter_enabled = 3; // The Cloud project_id to use for Stackdriver tracing. string stackdriver_project_id = 4; // (optional) By default, the Stackdriver exporter will connect to production // Stackdriver. If stackdriver_address is non-empty, it will instead connect // to this address, which is in the gRPC format: // https://github.com/grpc/grpc/blob/master/doc/naming.md string stackdriver_address = 10; // (optional) The gRPC server that hosts Stackdriver tracing service. Only // Google gRPC is supported. If :ref:`target_uri ` // is not provided, the default production Stackdriver address will be used. core.v3.GrpcService stackdriver_grpc_service = 13; // Enables the Zipkin exporter if set to true. The url and service name must // also be set. bool zipkin_exporter_enabled = 5; // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans" string zipkin_url = 6; // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or // ocagent_grpc_service must also be set. bool ocagent_exporter_enabled = 11; // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC // format: https://github.com/grpc/grpc/blob/master/doc/naming.md // [#comment:TODO: deprecate this field] string ocagent_address = 12; // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. // This is only used if the ocagent_address is left empty. core.v3.GrpcService ocagent_grpc_service = 14; // List of incoming trace context headers we will accept. First one found // wins. repeated TraceContext incoming_trace_context = 8; // List of outgoing trace context headers we will produce. repeated TraceContext outgoing_trace_context = 9; } ================================================ FILE: api/envoy/config/trace/v3/service.proto ================================================ syntax = "proto3"; package envoy.config.trace.v3; import "envoy/config/core/v3/grpc_service.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "ServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Trace Service] // Configuration structure. message TraceServiceConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.TraceServiceConfig"; // The upstream gRPC cluster that hosts the metrics service. core.v3.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/trace/v3/trace.proto ================================================ syntax = "proto3"; package envoy.config.trace.v3; import "udpa/annotations/status.proto"; import public "envoy/config/trace/v3/datadog.proto"; import public "envoy/config/trace/v3/dynamic_ot.proto"; import public "envoy/config/trace/v3/http_tracer.proto"; import public "envoy/config/trace/v3/lightstep.proto"; import public "envoy/config/trace/v3/opencensus.proto"; import public "envoy/config/trace/v3/service.proto"; import public "envoy/config/trace/v3/zipkin.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "TraceProto"; option java_multiple_files = true; ================================================ FILE: api/envoy/config/trace/v3/xray.proto ================================================ syntax = "proto3"; package envoy.config.trace.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "XrayProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.xray.v4alpha"; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AWS X-Ray Tracer Configuration] // Configuration for AWS X-Ray tracer message XRayConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2alpha.XRayConfig"; message SegmentFields { // The type of AWS resource, e.g. "AWS::AppMesh::Proxy". string origin = 1; // AWS resource metadata dictionary. // See: `X-Ray Segment Document documentation `__ google.protobuf.Struct aws = 2; } // The UDP endpoint of the X-Ray Daemon where the spans will be sent. // If this value is not set, the default value of 127.0.0.1:2000 will be used. core.v3.SocketAddress daemon_endpoint = 1; // The name of the X-Ray segment. string segment_name = 2 [(validate.rules).string = {min_len: 1}]; // The location of a local custom sampling rules JSON file. // For an example of the sampling rules see: // `X-Ray SDK documentation // `_ core.v3.DataSource sampling_rule_manifest = 3; // Optional custom fields to be added to each trace segment. // see: `X-Ray Segment Document documentation // `__ SegmentFields segment_fields = 4; } ================================================ FILE: api/envoy/config/trace/v3/zipkin.proto ================================================ syntax = "proto3"; package envoy.config.trace.v3; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v3"; option java_outer_classname = "ZipkinProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.tracers.zipkin.v4alpha"; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Zipkin tracer] // Configuration for the Zipkin tracer. // [#extension: envoy.tracers.zipkin] // [#next-free-field: 6] message ZipkinConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v2.ZipkinConfig"; // Available Zipkin collector endpoint versions. enum CollectorEndpointVersion { // Zipkin API v1, JSON over HTTP. // [#comment: The default implementation of Zipkin client before this field is added was only v1 // and the way user configure this was by not explicitly specifying the version. Consequently, // before this is added, the corresponding Zipkin collector expected to receive v1 payload. // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, // since in Zipkin realm this v1 version is considered to be not preferable anymore.] DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; // Zipkin API v2, JSON over HTTP. HTTP_JSON = 1; // Zipkin API v2, protobuf over HTTP. HTTP_PROTO = 2; // [#not-implemented-hide:] GRPC = 3; } // The cluster manager cluster that hosts the Zipkin collectors. Note that the // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster // resources `. string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The API endpoint of the Zipkin service where the spans will be sent. When // using a standard Zipkin installation, the API endpoint is typically // /api/v1/spans, which is the default value. string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Determines whether a 128bit trace id will be used when creating a new // trace instance. The default value is false, which will result in a 64 bit trace id being used. bool trace_id_128bit = 3; // Determines whether client and server spans will share the same span context. // The default value is true. google.protobuf.BoolValue shared_span_context = 4; // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be // used. CollectorEndpointVersion collector_endpoint_version = 5; } ================================================ FILE: api/envoy/config/trace/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/trace/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/trace/v4alpha/http_tracer.proto ================================================ syntax = "proto3"; package envoy.config.trace.v4alpha; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; option java_outer_classname = "HttpTracerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Tracing] // Tracing :ref:`architecture overview `. // The tracing configuration specifies settings for an HTTP tracer provider used by Envoy. // // Envoy may support other tracers in the future, but right now the HTTP tracer is the only one // supported. // // .. attention:: // // Use of this message type has been deprecated in favor of direct use of // :ref:`Tracing.Http `. message Tracing { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.Tracing"; // Configuration for an HTTP tracer provider used by Envoy. // // The configuration is defined by the // :ref:`HttpConnectionManager.Tracing ` // :ref:`provider ` // field. message Http { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.Tracing.Http"; reserved 2; reserved "config"; // The name of the HTTP trace driver to instantiate. The name must match a // supported HTTP trace driver. Built-in trace drivers: // // - *envoy.tracers.lightstep* // - *envoy.tracers.zipkin* // - *envoy.tracers.dynamic_ot* // - *envoy.tracers.datadog* // - *envoy.tracers.opencensus* // - *envoy.tracers.xray* string name = 1 [(validate.rules).string = {min_len: 1}]; // Trace driver specific configuration which depends on the driver being instantiated. // See the trace drivers for examples: // // - :ref:`LightstepConfig ` // - :ref:`ZipkinConfig ` // - :ref:`DynamicOtConfig ` // - :ref:`DatadogConfig ` // - :ref:`OpenCensusConfig ` // - :ref:`AWS X-Ray ` oneof config_type { google.protobuf.Any typed_config = 3; } } // Provides configuration for the HTTP tracer. Http http = 1; } ================================================ FILE: api/envoy/config/trace/v4alpha/service.proto ================================================ syntax = "proto3"; package envoy.config.trace.v4alpha; import "envoy/config/core/v4alpha/grpc_service.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.trace.v4alpha"; option java_outer_classname = "ServiceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Trace Service] // Configuration structure. message TraceServiceConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.TraceServiceConfig"; // The upstream gRPC cluster that hosts the metrics service. core.v4alpha.GrpcService grpc_service = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/config/transport_socket/alts/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/transport_socket/alts/v2alpha/alts.proto ================================================ syntax = "proto3"; package envoy.config.transport_socket.alts.v2alpha; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.transport_socket.alts.v2alpha"; option java_outer_classname = "AltsProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.alts.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: ALTS] // [#extension: envoy.transport_sockets.alts] // Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy. // https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ message Alts { // The location of a handshaker service, this is usually 169.254.169.254:8080 // on GCE. string handshaker_service = 1 [(validate.rules).string = {min_bytes: 1}]; // The acceptable service accounts from peer, peers not in the list will be rejected in the // handshake validation step. If empty, no validation will be performed. repeated string peer_service_accounts = 2; } ================================================ FILE: api/envoy/config/transport_socket/raw_buffer/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/config/transport_socket/raw_buffer/v2/raw_buffer.proto ================================================ syntax = "proto3"; package envoy.config.transport_socket.raw_buffer.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.config.transport_socket.raw_buffer.v2"; option java_outer_classname = "RawBufferProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.raw_buffer.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Raw Buffer] // [#extension: envoy.transport_sockets.raw_buffer] // Configuration for raw buffer transport socket. message RawBuffer { } ================================================ FILE: api/envoy/config/transport_socket/tap/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "//envoy/config/common/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/config/transport_socket/tap/v2alpha/tap.proto ================================================ syntax = "proto3"; package envoy.config.transport_socket.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/config/common/tap/v2alpha/common.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.config.transport_socket.tap.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.extensions.transport_sockets.tap.v3"; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap] // [#extension: envoy.transport_sockets.tap] // Configuration for tap transport socket. This wraps another transport socket, providing the // ability to interpose and record in plain text any traffic that is surfaced to Envoy. message Tap { // Common configuration for the tap transport socket. common.tap.v2alpha.CommonExtensionConfig common_config = 1 [(validate.rules).message = {required: true}]; // The underlying transport socket being wrapped. api.v2.core.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/data/accesslog/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/accesslog/v2/accesslog.proto ================================================ syntax = "proto3"; package envoy.data.accesslog.v2; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.accesslog.v2"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC access logs] // Envoy access logs describe incoming interaction with Envoy over a fixed // period of time, and typically cover a single request/response exchange, // (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP). // Access logs contain fields defined in protocol-specific protobuf messages. // // Except where explicitly declared otherwise, all fields describe // *downstream* interaction between Envoy and a connected client. // Fields describing *upstream* interaction will explicitly include ``upstream`` // in their name. message TCPAccessLogEntry { // Common properties shared by all Envoy access logs. AccessLogCommon common_properties = 1; // Properties of the TCP connection. ConnectionProperties connection_properties = 2; } message HTTPAccessLogEntry { // HTTP version enum HTTPVersion { PROTOCOL_UNSPECIFIED = 0; HTTP10 = 1; HTTP11 = 2; HTTP2 = 3; HTTP3 = 4; } // Common properties shared by all Envoy access logs. AccessLogCommon common_properties = 1; HTTPVersion protocol_version = 2; // Description of the incoming HTTP request. HTTPRequestProperties request = 3; // Description of the outgoing HTTP response. HTTPResponseProperties response = 4; } // Defines fields for a connection message ConnectionProperties { // Number of bytes received from downstream. uint64 received_bytes = 1; // Number of bytes sent to downstream. uint64 sent_bytes = 2; } // Defines fields that are shared by all Envoy access logs. // [#next-free-field: 22] message AccessLogCommon { // [#not-implemented-hide:] // This field indicates the rate at which this log entry was sampled. // Valid range is (0.0, 1.0]. double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; // This field is the remote/origin address on which the request from the user was received. // Note: This may not be the physical peer. E.g, if the remote address is inferred from for // example the x-forwarder-for header, proxy protocol, etc. api.v2.core.Address downstream_remote_address = 2; // This field is the local/destination address on which the request from the user was received. api.v2.core.Address downstream_local_address = 3; // If the connection is secure,S this field will contain TLS properties. TLSProperties tls_properties = 4; // The time that Envoy started servicing this request. This is effectively the time that the first // downstream byte is received. google.protobuf.Timestamp start_time = 5; // Interval between the first downstream byte received and the last // downstream byte received (i.e. time it takes to receive a request). google.protobuf.Duration time_to_last_rx_byte = 6; // Interval between the first downstream byte received and the first upstream byte sent. There may // by considerable delta between *time_to_last_rx_byte* and this value due to filters. // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about // not accounting for kernel socket buffer time, etc. google.protobuf.Duration time_to_first_upstream_tx_byte = 7; // Interval between the first downstream byte received and the last upstream byte sent. There may // by considerable delta between *time_to_last_rx_byte* and this value due to filters. // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about // not accounting for kernel socket buffer time, etc. google.protobuf.Duration time_to_last_upstream_tx_byte = 8; // Interval between the first downstream byte received and the first upstream // byte received (i.e. time it takes to start receiving a response). google.protobuf.Duration time_to_first_upstream_rx_byte = 9; // Interval between the first downstream byte received and the last upstream // byte received (i.e. time it takes to receive a complete response). google.protobuf.Duration time_to_last_upstream_rx_byte = 10; // Interval between the first downstream byte received and the first downstream byte sent. // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field // due to filters. Additionally, the same caveats apply as documented in // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc. google.protobuf.Duration time_to_first_downstream_tx_byte = 11; // Interval between the first downstream byte received and the last downstream byte sent. // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate // time. In the current implementation it does not include kernel socket buffer time. In the // current implementation it also does not include send window buffering inside the HTTP/2 codec. // In the future it is likely that work will be done to make this duration more accurate. google.protobuf.Duration time_to_last_downstream_tx_byte = 12; // The upstream remote/destination address that handles this exchange. This does not include // retries. api.v2.core.Address upstream_remote_address = 13; // The upstream local/origin address that handles this exchange. This does not include retries. api.v2.core.Address upstream_local_address = 14; // The upstream cluster that *upstream_remote_address* belongs to. string upstream_cluster = 15; // Flags indicating occurrences during request/response processing. ResponseFlags response_flags = 16; // All metadata encountered during request processing, including endpoint // selection. // // This can be used to associate IDs attached to the various configurations // used to process this request with the access log entry. For example, a // route created from a higher level forwarding rule with some ID can place // that ID in this field and cross reference later. It can also be used to // determine if a canary endpoint was used or not. api.v2.core.Metadata metadata = 17; // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the // failure reason from the transport socket. The format of this field depends on the configured // upstream transport socket. Common TLS failures are in // :ref:`TLS trouble shooting `. string upstream_transport_failure_reason = 18; // The name of the route string route_name = 19; // This field is the downstream direct remote address on which the request from the user was // received. Note: This is always the physical peer, even if the remote address is inferred from // for example the x-forwarder-for header, proxy protocol, etc. api.v2.core.Address downstream_direct_remote_address = 20; // Map of filter state in stream info that have been configured to be logged. If the filter // state serialized to any message other than `google.protobuf.Any` it will be packed into // `google.protobuf.Any`. map filter_state_objects = 21; } // Flags indicating occurrences during request/response processing. // [#next-free-field: 20] message ResponseFlags { message Unauthorized { // Reasons why the request was unauthorized enum Reason { REASON_UNSPECIFIED = 0; // The request was denied by the external authorization service. EXTERNAL_SERVICE = 1; } Reason reason = 1; } // Indicates local server healthcheck failed. bool failed_local_healthcheck = 1; // Indicates there was no healthy upstream. bool no_healthy_upstream = 2; // Indicates an there was an upstream request timeout. bool upstream_request_timeout = 3; // Indicates local codec level reset was sent on the stream. bool local_reset = 4; // Indicates remote codec level reset was received on the stream. bool upstream_remote_reset = 5; // Indicates there was a local reset by a connection pool due to an initial connection failure. bool upstream_connection_failure = 6; // Indicates the stream was reset due to an upstream connection termination. bool upstream_connection_termination = 7; // Indicates the stream was reset because of a resource overflow. bool upstream_overflow = 8; // Indicates no route was found for the request. bool no_route_found = 9; // Indicates that the request was delayed before proxying. bool delay_injected = 10; // Indicates that the request was aborted with an injected error code. bool fault_injected = 11; // Indicates that the request was rate-limited locally. bool rate_limited = 12; // Indicates if the request was deemed unauthorized and the reason for it. Unauthorized unauthorized_details = 13; // Indicates that the request was rejected because there was an error in rate limit service. bool rate_limit_service_error = 14; // Indicates the stream was reset due to a downstream connection termination. bool downstream_connection_termination = 15; // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. bool upstream_retry_limit_exceeded = 16; // Indicates that the stream idle timeout was hit, resulting in a downstream 408. bool stream_idle_timeout = 17; // Indicates that the request was rejected because an envoy request header failed strict // validation. bool invalid_envoy_request_headers = 18; // Indicates there was an HTTP protocol error on the downstream request. bool downstream_protocol_error = 19; } // Properties of a negotiated TLS connection. // [#next-free-field: 7] message TLSProperties { enum TLSVersion { VERSION_UNSPECIFIED = 0; TLSv1 = 1; TLSv1_1 = 2; TLSv1_2 = 3; TLSv1_3 = 4; } message CertificateProperties { message SubjectAltName { oneof san { string uri = 1; // [#not-implemented-hide:] string dns = 2; } } // SANs present in the certificate. repeated SubjectAltName subject_alt_name = 1; // The subject field of the certificate. string subject = 2; } // Version of TLS that was negotiated. TLSVersion tls_version = 1; // TLS cipher suite negotiated during handshake. The value is a // four-digit hex code defined by the IANA TLS Cipher Suite Registry // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). // // Here it is expressed as an integer. google.protobuf.UInt32Value tls_cipher_suite = 2; // SNI hostname from handshake. string tls_sni_hostname = 3; // Properties of the local certificate used to negotiate TLS. CertificateProperties local_certificate_properties = 4; // Properties of the peer certificate used to negotiate TLS. CertificateProperties peer_certificate_properties = 5; // The TLS session ID. string tls_session_id = 6; } // [#next-free-field: 14] message HTTPRequestProperties { // The request method (RFC 7231/2616). api.v2.core.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}]; // The scheme portion of the incoming request URI. string scheme = 2; // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. string authority = 3; // The port of the incoming request URI // (unused currently, as port is composed onto authority). google.protobuf.UInt32Value port = 4; // The path portion from the incoming request URI. string path = 5; // Value of the ``User-Agent`` request header. string user_agent = 6; // Value of the ``Referer`` request header. string referer = 7; // Value of the ``X-Forwarded-For`` request header. string forwarded_for = 8; // Value of the ``X-Request-Id`` request header // // This header is used by Envoy to uniquely identify a request. // It will be generated for all external requests and internal requests that // do not already have a request ID. string request_id = 9; // Value of the ``X-Envoy-Original-Path`` request header. string original_path = 10; // Size of the HTTP request headers in bytes. // // This value is captured from the OSI layer 7 perspective, i.e. it does not // include overhead from framing or encoding at other networking layers. uint64 request_headers_bytes = 11; // Size of the HTTP request body in bytes. // // This value is captured from the OSI layer 7 perspective, i.e. it does not // include overhead from framing or encoding at other networking layers. uint64 request_body_bytes = 12; // Map of additional headers that have been configured to be logged. map request_headers = 13; } // [#next-free-field: 7] message HTTPResponseProperties { // The HTTP response code returned by Envoy. google.protobuf.UInt32Value response_code = 1; // Size of the HTTP response headers in bytes. // // This value is captured from the OSI layer 7 perspective, i.e. it does not // include overhead from framing or encoding at other networking layers. uint64 response_headers_bytes = 2; // Size of the HTTP response body in bytes. // // This value is captured from the OSI layer 7 perspective, i.e. it does not // include overhead from framing or encoding at other networking layers. uint64 response_body_bytes = 3; // Map of additional headers configured to be logged. map response_headers = 4; // Map of trailers configured to be logged. map response_trailers = 5; // The HTTP response code details. string response_code_details = 6; } ================================================ FILE: api/envoy/data/accesslog/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/data/accesslog/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/accesslog/v3/accesslog.proto ================================================ syntax = "proto3"; package envoy.data.accesslog.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.accesslog.v3"; option java_outer_classname = "AccesslogProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC access logs] // Envoy access logs describe incoming interaction with Envoy over a fixed // period of time, and typically cover a single request/response exchange, // (e.g. HTTP), stream (e.g. over HTTP/gRPC), or proxied connection (e.g. TCP). // Access logs contain fields defined in protocol-specific protobuf messages. // // Except where explicitly declared otherwise, all fields describe // *downstream* interaction between Envoy and a connected client. // Fields describing *upstream* interaction will explicitly include ``upstream`` // in their name. message TCPAccessLogEntry { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.TCPAccessLogEntry"; // Common properties shared by all Envoy access logs. AccessLogCommon common_properties = 1; // Properties of the TCP connection. ConnectionProperties connection_properties = 2; } message HTTPAccessLogEntry { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.HTTPAccessLogEntry"; // HTTP version enum HTTPVersion { PROTOCOL_UNSPECIFIED = 0; HTTP10 = 1; HTTP11 = 2; HTTP2 = 3; HTTP3 = 4; } // Common properties shared by all Envoy access logs. AccessLogCommon common_properties = 1; HTTPVersion protocol_version = 2; // Description of the incoming HTTP request. HTTPRequestProperties request = 3; // Description of the outgoing HTTP response. HTTPResponseProperties response = 4; } // Defines fields for a connection message ConnectionProperties { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ConnectionProperties"; // Number of bytes received from downstream. uint64 received_bytes = 1; // Number of bytes sent to downstream. uint64 sent_bytes = 2; } // Defines fields that are shared by all Envoy access logs. // [#next-free-field: 22] message AccessLogCommon { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.AccessLogCommon"; // [#not-implemented-hide:] // This field indicates the rate at which this log entry was sampled. // Valid range is (0.0, 1.0]. double sample_rate = 1 [(validate.rules).double = {lte: 1.0 gt: 0.0}]; // This field is the remote/origin address on which the request from the user was received. // Note: This may not be the physical peer. E.g, if the remote address is inferred from for // example the x-forwarder-for header, proxy protocol, etc. config.core.v3.Address downstream_remote_address = 2; // This field is the local/destination address on which the request from the user was received. config.core.v3.Address downstream_local_address = 3; // If the connection is secure,S this field will contain TLS properties. TLSProperties tls_properties = 4; // The time that Envoy started servicing this request. This is effectively the time that the first // downstream byte is received. google.protobuf.Timestamp start_time = 5; // Interval between the first downstream byte received and the last // downstream byte received (i.e. time it takes to receive a request). google.protobuf.Duration time_to_last_rx_byte = 6; // Interval between the first downstream byte received and the first upstream byte sent. There may // by considerable delta between *time_to_last_rx_byte* and this value due to filters. // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about // not accounting for kernel socket buffer time, etc. google.protobuf.Duration time_to_first_upstream_tx_byte = 7; // Interval between the first downstream byte received and the last upstream byte sent. There may // by considerable delta between *time_to_last_rx_byte* and this value due to filters. // Additionally, the same caveats apply as documented in *time_to_last_downstream_tx_byte* about // not accounting for kernel socket buffer time, etc. google.protobuf.Duration time_to_last_upstream_tx_byte = 8; // Interval between the first downstream byte received and the first upstream // byte received (i.e. time it takes to start receiving a response). google.protobuf.Duration time_to_first_upstream_rx_byte = 9; // Interval between the first downstream byte received and the last upstream // byte received (i.e. time it takes to receive a complete response). google.protobuf.Duration time_to_last_upstream_rx_byte = 10; // Interval between the first downstream byte received and the first downstream byte sent. // There may be a considerable delta between the *time_to_first_upstream_rx_byte* and this field // due to filters. Additionally, the same caveats apply as documented in // *time_to_last_downstream_tx_byte* about not accounting for kernel socket buffer time, etc. google.protobuf.Duration time_to_first_downstream_tx_byte = 11; // Interval between the first downstream byte received and the last downstream byte sent. // Depending on protocol, buffering, windowing, filters, etc. there may be a considerable delta // between *time_to_last_upstream_rx_byte* and this field. Note also that this is an approximate // time. In the current implementation it does not include kernel socket buffer time. In the // current implementation it also does not include send window buffering inside the HTTP/2 codec. // In the future it is likely that work will be done to make this duration more accurate. google.protobuf.Duration time_to_last_downstream_tx_byte = 12; // The upstream remote/destination address that handles this exchange. This does not include // retries. config.core.v3.Address upstream_remote_address = 13; // The upstream local/origin address that handles this exchange. This does not include retries. config.core.v3.Address upstream_local_address = 14; // The upstream cluster that *upstream_remote_address* belongs to. string upstream_cluster = 15; // Flags indicating occurrences during request/response processing. ResponseFlags response_flags = 16; // All metadata encountered during request processing, including endpoint // selection. // // This can be used to associate IDs attached to the various configurations // used to process this request with the access log entry. For example, a // route created from a higher level forwarding rule with some ID can place // that ID in this field and cross reference later. It can also be used to // determine if a canary endpoint was used or not. config.core.v3.Metadata metadata = 17; // If upstream connection failed due to transport socket (e.g. TLS handshake), provides the // failure reason from the transport socket. The format of this field depends on the configured // upstream transport socket. Common TLS failures are in // :ref:`TLS trouble shooting `. string upstream_transport_failure_reason = 18; // The name of the route string route_name = 19; // This field is the downstream direct remote address on which the request from the user was // received. Note: This is always the physical peer, even if the remote address is inferred from // for example the x-forwarder-for header, proxy protocol, etc. config.core.v3.Address downstream_direct_remote_address = 20; // Map of filter state in stream info that have been configured to be logged. If the filter // state serialized to any message other than `google.protobuf.Any` it will be packed into // `google.protobuf.Any`. map filter_state_objects = 21; } // Flags indicating occurrences during request/response processing. // [#next-free-field: 24] message ResponseFlags { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags"; message Unauthorized { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.ResponseFlags.Unauthorized"; // Reasons why the request was unauthorized enum Reason { REASON_UNSPECIFIED = 0; // The request was denied by the external authorization service. EXTERNAL_SERVICE = 1; } Reason reason = 1; } // Indicates local server healthcheck failed. bool failed_local_healthcheck = 1; // Indicates there was no healthy upstream. bool no_healthy_upstream = 2; // Indicates an there was an upstream request timeout. bool upstream_request_timeout = 3; // Indicates local codec level reset was sent on the stream. bool local_reset = 4; // Indicates remote codec level reset was received on the stream. bool upstream_remote_reset = 5; // Indicates there was a local reset by a connection pool due to an initial connection failure. bool upstream_connection_failure = 6; // Indicates the stream was reset due to an upstream connection termination. bool upstream_connection_termination = 7; // Indicates the stream was reset because of a resource overflow. bool upstream_overflow = 8; // Indicates no route was found for the request. bool no_route_found = 9; // Indicates that the request was delayed before proxying. bool delay_injected = 10; // Indicates that the request was aborted with an injected error code. bool fault_injected = 11; // Indicates that the request was rate-limited locally. bool rate_limited = 12; // Indicates if the request was deemed unauthorized and the reason for it. Unauthorized unauthorized_details = 13; // Indicates that the request was rejected because there was an error in rate limit service. bool rate_limit_service_error = 14; // Indicates the stream was reset due to a downstream connection termination. bool downstream_connection_termination = 15; // Indicates that the upstream retry limit was exceeded, resulting in a downstream error. bool upstream_retry_limit_exceeded = 16; // Indicates that the stream idle timeout was hit, resulting in a downstream 408. bool stream_idle_timeout = 17; // Indicates that the request was rejected because an envoy request header failed strict // validation. bool invalid_envoy_request_headers = 18; // Indicates there was an HTTP protocol error on the downstream request. bool downstream_protocol_error = 19; // Indicates there was a max stream duration reached on the upstream request. bool upstream_max_stream_duration_reached = 20; // Indicates the response was served from a cache filter. bool response_from_cache_filter = 21; // Indicates that a filter configuration is not available. bool no_filter_config_found = 22; // Indicates that request or connection exceeded the downstream connection duration. bool duration_timeout = 23; } // Properties of a negotiated TLS connection. // [#next-free-field: 7] message TLSProperties { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.TLSProperties"; enum TLSVersion { VERSION_UNSPECIFIED = 0; TLSv1 = 1; TLSv1_1 = 2; TLSv1_2 = 3; TLSv1_3 = 4; } message CertificateProperties { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.TLSProperties.CertificateProperties"; message SubjectAltName { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.TLSProperties.CertificateProperties.SubjectAltName"; oneof san { string uri = 1; // [#not-implemented-hide:] string dns = 2; } } // SANs present in the certificate. repeated SubjectAltName subject_alt_name = 1; // The subject field of the certificate. string subject = 2; } // Version of TLS that was negotiated. TLSVersion tls_version = 1; // TLS cipher suite negotiated during handshake. The value is a // four-digit hex code defined by the IANA TLS Cipher Suite Registry // (e.g. ``009C`` for ``TLS_RSA_WITH_AES_128_GCM_SHA256``). // // Here it is expressed as an integer. google.protobuf.UInt32Value tls_cipher_suite = 2; // SNI hostname from handshake. string tls_sni_hostname = 3; // Properties of the local certificate used to negotiate TLS. CertificateProperties local_certificate_properties = 4; // Properties of the peer certificate used to negotiate TLS. CertificateProperties peer_certificate_properties = 5; // The TLS session ID. string tls_session_id = 6; } // [#next-free-field: 14] message HTTPRequestProperties { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.HTTPRequestProperties"; // The request method (RFC 7231/2616). config.core.v3.RequestMethod request_method = 1 [(validate.rules).enum = {defined_only: true}]; // The scheme portion of the incoming request URI. string scheme = 2; // HTTP/2 ``:authority`` or HTTP/1.1 ``Host`` header value. string authority = 3; // The port of the incoming request URI // (unused currently, as port is composed onto authority). google.protobuf.UInt32Value port = 4; // The path portion from the incoming request URI. string path = 5; // Value of the ``User-Agent`` request header. string user_agent = 6; // Value of the ``Referer`` request header. string referer = 7; // Value of the ``X-Forwarded-For`` request header. string forwarded_for = 8; // Value of the ``X-Request-Id`` request header // // This header is used by Envoy to uniquely identify a request. // It will be generated for all external requests and internal requests that // do not already have a request ID. string request_id = 9; // Value of the ``X-Envoy-Original-Path`` request header. string original_path = 10; // Size of the HTTP request headers in bytes. // // This value is captured from the OSI layer 7 perspective, i.e. it does not // include overhead from framing or encoding at other networking layers. uint64 request_headers_bytes = 11; // Size of the HTTP request body in bytes. // // This value is captured from the OSI layer 7 perspective, i.e. it does not // include overhead from framing or encoding at other networking layers. uint64 request_body_bytes = 12; // Map of additional headers that have been configured to be logged. map request_headers = 13; } // [#next-free-field: 7] message HTTPResponseProperties { option (udpa.annotations.versioning).previous_message_type = "envoy.data.accesslog.v2.HTTPResponseProperties"; // The HTTP response code returned by Envoy. google.protobuf.UInt32Value response_code = 1; // Size of the HTTP response headers in bytes. // // This value is captured from the OSI layer 7 perspective, i.e. it does not // include overhead from framing or encoding at other networking layers. uint64 response_headers_bytes = 2; // Size of the HTTP response body in bytes. // // This value is captured from the OSI layer 7 perspective, i.e. it does not // include overhead from framing or encoding at other networking layers. uint64 response_body_bytes = 3; // Map of additional headers configured to be logged. map response_headers = 4; // Map of trailers configured to be logged. map response_trailers = 5; // The HTTP response code details. string response_code_details = 6; } ================================================ FILE: api/envoy/data/cluster/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/data/cluster/v2alpha/outlier_detection_event.proto ================================================ syntax = "proto3"; package envoy.data.cluster.v2alpha; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.cluster.v2alpha"; option java_outer_classname = "OutlierDetectionEventProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.data.cluster.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Outlier detection logging events] // :ref:`Outlier detection logging `. // Type of ejection that took place enum OutlierEjectionType { // In case upstream host returns certain number of consecutive 5xx. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all type of errors are treated as HTTP 5xx errors. // See :ref:`Cluster outlier detection ` documentation for // details. CONSECUTIVE_5XX = 0; // In case upstream host returns certain number of consecutive gateway errors CONSECUTIVE_GATEWAY_FAILURE = 1; // Runs over aggregated success rate statistics from every host in cluster // and selects hosts for which ratio of successful replies deviates from other hosts // in the cluster. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all errors (externally and locally generated) are used to calculate success rate // statistics. See :ref:`Cluster outlier detection ` // documentation for details. SUCCESS_RATE = 2; // Consecutive local origin failures: Connection failures, resets, timeouts, etc // This type of ejection happens only when // :ref:`outlier_detection.split_external_local_origin_errors` // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; // Runs over aggregated success rate statistics for local origin failures // for all hosts in the cluster and selects hosts for which success rate deviates from other // hosts in the cluster. This type of ejection happens only when // :ref:`outlier_detection.split_external_local_origin_errors` // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for SUCCESS_RATE_LOCAL_ORIGIN = 4; // Runs over aggregated success rate statistics from every host in cluster and selects hosts for // which ratio of failed replies is above configured value. FAILURE_PERCENTAGE = 5; // Runs over aggregated success rate statistics for local origin failures from every host in // cluster and selects hosts for which ratio of failed replies is above configured value. FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; } // Represents possible action applied to upstream host enum Action { // In case host was excluded from service EJECT = 0; // In case host was brought back into service UNEJECT = 1; } // [#next-free-field: 12] message OutlierDetectionEvent { // In case of eject represents type of ejection that took place. OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; // Timestamp for event. google.protobuf.Timestamp timestamp = 2; // The time in seconds since the last action (either an ejection or unejection) took place. google.protobuf.UInt64Value secs_since_last_action = 3; // The :ref:`cluster ` that owns the ejected host. string cluster_name = 4 [(validate.rules).string = {min_bytes: 1}]; // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. string upstream_url = 5 [(validate.rules).string = {min_bytes: 1}]; // The action that took place. Action action = 6 [(validate.rules).enum = {defined_only: true}]; // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and // then re-added). uint32 num_ejections = 7; // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was // ejected. ``false`` means the event was logged but the host was not actually ejected. bool enforced = 8; oneof event { option (validate.required) = true; OutlierEjectSuccessRate eject_success_rate_event = 9; OutlierEjectConsecutive eject_consecutive_event = 10; OutlierEjectFailurePercentage eject_failure_percentage_event = 11; } } message OutlierEjectSuccessRate { // Host’s success rate at the time of the ejection event on a 0-100 range. uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 // range. uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; // Success rate ejection threshold at the time of the ejection event. uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; } message OutlierEjectConsecutive { } message OutlierEjectFailurePercentage { // Host's success rate at the time of the ejection event on a 0-100 range. uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; } ================================================ FILE: api/envoy/data/cluster/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/data/cluster/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/cluster/v3/outlier_detection_event.proto ================================================ syntax = "proto3"; package envoy.data.cluster.v3; import "google/protobuf/timestamp.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.cluster.v3"; option java_outer_classname = "OutlierDetectionEventProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Outlier detection logging events] // :ref:`Outlier detection logging `. // Type of ejection that took place enum OutlierEjectionType { // In case upstream host returns certain number of consecutive 5xx. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all type of errors are treated as HTTP 5xx errors. // See :ref:`Cluster outlier detection ` documentation for // details. CONSECUTIVE_5XX = 0; // In case upstream host returns certain number of consecutive gateway errors CONSECUTIVE_GATEWAY_FAILURE = 1; // Runs over aggregated success rate statistics from every host in cluster // and selects hosts for which ratio of successful replies deviates from other hosts // in the cluster. // If // :ref:`outlier_detection.split_external_local_origin_errors` // is *false*, all errors (externally and locally generated) are used to calculate success rate // statistics. See :ref:`Cluster outlier detection ` // documentation for details. SUCCESS_RATE = 2; // Consecutive local origin failures: Connection failures, resets, timeouts, etc // This type of ejection happens only when // :ref:`outlier_detection.split_external_local_origin_errors` // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for CONSECUTIVE_LOCAL_ORIGIN_FAILURE = 3; // Runs over aggregated success rate statistics for local origin failures // for all hosts in the cluster and selects hosts for which success rate deviates from other // hosts in the cluster. This type of ejection happens only when // :ref:`outlier_detection.split_external_local_origin_errors` // is set to *true*. // See :ref:`Cluster outlier detection ` documentation for SUCCESS_RATE_LOCAL_ORIGIN = 4; // Runs over aggregated success rate statistics from every host in cluster and selects hosts for // which ratio of failed replies is above configured value. FAILURE_PERCENTAGE = 5; // Runs over aggregated success rate statistics for local origin failures from every host in // cluster and selects hosts for which ratio of failed replies is above configured value. FAILURE_PERCENTAGE_LOCAL_ORIGIN = 6; } // Represents possible action applied to upstream host enum Action { // In case host was excluded from service EJECT = 0; // In case host was brought back into service UNEJECT = 1; } // [#next-free-field: 12] message OutlierDetectionEvent { option (udpa.annotations.versioning).previous_message_type = "envoy.data.cluster.v2alpha.OutlierDetectionEvent"; // In case of eject represents type of ejection that took place. OutlierEjectionType type = 1 [(validate.rules).enum = {defined_only: true}]; // Timestamp for event. google.protobuf.Timestamp timestamp = 2; // The time in seconds since the last action (either an ejection or unejection) took place. google.protobuf.UInt64Value secs_since_last_action = 3; // The :ref:`cluster ` that owns the ejected host. string cluster_name = 4 [(validate.rules).string = {min_len: 1}]; // The URL of the ejected host. E.g., ``tcp://1.2.3.4:80``. string upstream_url = 5 [(validate.rules).string = {min_len: 1}]; // The action that took place. Action action = 6 [(validate.rules).enum = {defined_only: true}]; // If ``action`` is ``eject``, specifies the number of times the host has been ejected (local to // that Envoy and gets reset if the host gets removed from the upstream cluster for any reason and // then re-added). uint32 num_ejections = 7; // If ``action`` is ``eject``, specifies if the ejection was enforced. ``true`` means the host was // ejected. ``false`` means the event was logged but the host was not actually ejected. bool enforced = 8; oneof event { option (validate.required) = true; OutlierEjectSuccessRate eject_success_rate_event = 9; OutlierEjectConsecutive eject_consecutive_event = 10; OutlierEjectFailurePercentage eject_failure_percentage_event = 11; } } message OutlierEjectSuccessRate { option (udpa.annotations.versioning).previous_message_type = "envoy.data.cluster.v2alpha.OutlierEjectSuccessRate"; // Host’s success rate at the time of the ejection event on a 0-100 range. uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; // Average success rate of the hosts in the cluster at the time of the ejection event on a 0-100 // range. uint32 cluster_average_success_rate = 2 [(validate.rules).uint32 = {lte: 100}]; // Success rate ejection threshold at the time of the ejection event. uint32 cluster_success_rate_ejection_threshold = 3 [(validate.rules).uint32 = {lte: 100}]; } message OutlierEjectConsecutive { option (udpa.annotations.versioning).previous_message_type = "envoy.data.cluster.v2alpha.OutlierEjectConsecutive"; } message OutlierEjectFailurePercentage { option (udpa.annotations.versioning).previous_message_type = "envoy.data.cluster.v2alpha.OutlierEjectFailurePercentage"; // Host's success rate at the time of the ejection event on a 0-100 range. uint32 host_success_rate = 1 [(validate.rules).uint32 = {lte: 100}]; } ================================================ FILE: api/envoy/data/core/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/core/v2alpha/health_check_event.proto ================================================ syntax = "proto3"; package envoy.data.core.v2alpha; import "envoy/api/v2/core/address.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.core.v2alpha"; option java_outer_classname = "HealthCheckEventProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health check logging events] // :ref:`Health check logging `. enum HealthCheckFailureType { ACTIVE = 0; PASSIVE = 1; NETWORK = 2; } enum HealthCheckerType { HTTP = 0; TCP = 1; GRPC = 2; REDIS = 3; } // [#next-free-field: 10] message HealthCheckEvent { HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}]; api.v2.core.Address host = 2; string cluster_name = 3 [(validate.rules).string = {min_bytes: 1}]; oneof event { option (validate.required) = true; // Host ejection. HealthCheckEjectUnhealthy eject_unhealthy_event = 4; // Host addition. HealthCheckAddHealthy add_healthy_event = 5; // Host failure. HealthCheckFailure health_check_failure_event = 7; // Healthy host became degraded. DegradedHealthyHost degraded_healthy_host = 8; // A degraded host returned to being healthy. NoLongerDegradedHost no_longer_degraded_host = 9; } // Timestamp for event. google.protobuf.Timestamp timestamp = 6; } message HealthCheckEjectUnhealthy { // The type of failure that caused this ejection. HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; } message HealthCheckAddHealthy { // Whether this addition is the result of the first ever health check on a host, in which case // the configured :ref:`healthy threshold ` // is bypassed and the host is immediately added. bool first_check = 1; } message HealthCheckFailure { // The type of failure that caused this event. HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; // Whether this event is the result of the first ever health check on a host. bool first_check = 2; } message DegradedHealthyHost { } message NoLongerDegradedHost { } ================================================ FILE: api/envoy/data/core/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/data/core/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/core/v3/health_check_event.proto ================================================ syntax = "proto3"; package envoy.data.core.v3; import "envoy/config/core/v3/address.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.core.v3"; option java_outer_classname = "HealthCheckEventProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health check logging events] // :ref:`Health check logging `. enum HealthCheckFailureType { ACTIVE = 0; PASSIVE = 1; NETWORK = 2; } enum HealthCheckerType { HTTP = 0; TCP = 1; GRPC = 2; REDIS = 3; } // [#next-free-field: 10] message HealthCheckEvent { option (udpa.annotations.versioning).previous_message_type = "envoy.data.core.v2alpha.HealthCheckEvent"; HealthCheckerType health_checker_type = 1 [(validate.rules).enum = {defined_only: true}]; config.core.v3.Address host = 2; string cluster_name = 3 [(validate.rules).string = {min_len: 1}]; oneof event { option (validate.required) = true; // Host ejection. HealthCheckEjectUnhealthy eject_unhealthy_event = 4; // Host addition. HealthCheckAddHealthy add_healthy_event = 5; // Host failure. HealthCheckFailure health_check_failure_event = 7; // Healthy host became degraded. DegradedHealthyHost degraded_healthy_host = 8; // A degraded host returned to being healthy. NoLongerDegradedHost no_longer_degraded_host = 9; } // Timestamp for event. google.protobuf.Timestamp timestamp = 6; } message HealthCheckEjectUnhealthy { option (udpa.annotations.versioning).previous_message_type = "envoy.data.core.v2alpha.HealthCheckEjectUnhealthy"; // The type of failure that caused this ejection. HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; } message HealthCheckAddHealthy { option (udpa.annotations.versioning).previous_message_type = "envoy.data.core.v2alpha.HealthCheckAddHealthy"; // Whether this addition is the result of the first ever health check on a host, in which case // the configured :ref:`healthy threshold ` // is bypassed and the host is immediately added. bool first_check = 1; } message HealthCheckFailure { option (udpa.annotations.versioning).previous_message_type = "envoy.data.core.v2alpha.HealthCheckFailure"; // The type of failure that caused this event. HealthCheckFailureType failure_type = 1 [(validate.rules).enum = {defined_only: true}]; // Whether this event is the result of the first ever health check on a host. bool first_check = 2; } message DegradedHealthyHost { option (udpa.annotations.versioning).previous_message_type = "envoy.data.core.v2alpha.DegradedHealthyHost"; } message NoLongerDegradedHost { option (udpa.annotations.versioning).previous_message_type = "envoy.data.core.v2alpha.NoLongerDegradedHost"; } ================================================ FILE: api/envoy/data/dns/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/dns/v2alpha/dns_table.proto ================================================ syntax = "proto3"; package envoy.data.dns.v2alpha; import "envoy/type/matcher/string.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.dns.v2alpha"; option java_outer_classname = "DnsTableProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: DNS Filter Table Data] // :ref:`DNS Filter config overview `. // This message contains the configuration for the DNS Filter if populated // from the control plane message DnsTable { // This message contains a list of IP addresses returned for a query for a known name message AddressList { // This field contains a well formed IP address that is returned // in the answer for a name query. The address field can be an // IPv4 or IPv6 address. Address family detection is done automatically // when Envoy parses the string. Since this field is repeated, // Envoy will return one randomly chosen entry from this list in the // DNS response. The random index will vary per query so that we prevent // clients pinning on a single address for a configured domain repeated string address = 1 [(validate.rules).repeated = { min_items: 1 items {string {min_len: 3}} }]; } // This message type is extensible and can contain a list of addresses // or dictate some other method for resolving the addresses for an // endpoint message DnsEndpoint { oneof endpoint_config { option (validate.required) = true; AddressList address_list = 1; } } message DnsVirtualDomain { // The domain name for which Envoy will respond to query requests string name = 1 [(validate.rules).string = {min_len: 2 well_known_regex: HTTP_HEADER_NAME}]; // The configuration containing the method to determine the address // of this endpoint DnsEndpoint endpoint = 2; // Sets the TTL in dns answers from Envoy returned to the client google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gt {}}]; } // Control how many times envoy makes an attempt to forward a query to // an external server uint32 external_retry_count = 1; // Fully qualified domain names for which Envoy will respond to queries repeated DnsVirtualDomain virtual_domains = 2 [(validate.rules).repeated = {min_items: 1}]; // This field serves to help Envoy determine whether it can authoritatively // answer a query for a name matching a suffix in this list. If the query // name does not match a suffix in this list, Envoy will forward // the query to an upstream DNS server repeated type.matcher.StringMatcher known_suffixes = 3; } ================================================ FILE: api/envoy/data/dns/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/data/dns/v2alpha:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/dns/v3/dns_table.proto ================================================ syntax = "proto3"; package envoy.data.dns.v3; import "envoy/type/matcher/v3/string.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.dns.v3"; option java_outer_classname = "DnsTableProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: DNS Filter Table Data] // :ref:`DNS Filter config overview `. // This message contains the configuration for the DNS Filter if populated // from the control plane message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable"; // This message contains a list of IP addresses returned for a query for a known name message AddressList { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.AddressList"; // This field contains a well formed IP address that is returned in the answer for a // name query. The address field can be an IPv4 or IPv6 address. Address family // detection is done automatically when Envoy parses the string. Since this field is // repeated, Envoy will return as many entries from this list in the DNS response while // keeping the response under 512 bytes repeated string address = 1 [(validate.rules).repeated = { min_items: 1 items {string {min_len: 3}} }]; } // Specify the service protocol using a numeric or string value message DnsServiceProtocol { oneof protocol_config { option (validate.required) = true; // Specify the protocol number for the service. Envoy will try to resolve the number to // the protocol name. For example, 6 will resolve to "tcp". Refer to: // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml // for protocol names and numbers uint32 number = 1 [(validate.rules).uint32 = {lt: 255}]; // Specify the protocol name for the service. string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; } } // Specify the target for a given DNS service // [#next-free-field: 6] message DnsServiceTarget { // Specify the name of the endpoint for the Service. The name is a hostname or a cluster oneof endpoint_type { option (validate.required) = true; // Use a resolvable hostname as the endpoint for a service. string host_name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // Use a cluster name as the endpoint for a service. string cluster_name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; } // The priority of the service record target uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}]; // The weight of the service record target uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}]; // The port to which the service is bound. This value is optional if the target is a // cluster. Setting port to zero in this case makes the filter use the port value // from the cluster host uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}]; } // This message defines a service selection record returned for a service query in a domain message DnsService { // The name of the service without the protocol or domain name string service_name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The service protocol. This can be specified as a string or the numeric value of the protocol DnsServiceProtocol protocol = 2; // The service entry time to live. This is independent from the DNS Answer record TTL google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}]; // The list of targets hosting the service repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}]; } // Define a list of service records for a given service message DnsServiceList { repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}]; } message DnsEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsEndpoint"; oneof endpoint_config { option (validate.required) = true; // Define a list of addresses to return for the specified endpoint AddressList address_list = 1; // Define a cluster whose addresses are returned for the specified endpoint string cluster_name = 2; // Define a DNS Service List for the specified endpoint DnsServiceList service_list = 3; } } message DnsVirtualDomain { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v2alpha.DnsTable.DnsVirtualDomain"; // A domain name for which Envoy will respond to query requests string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; } // Control how many times Envoy makes an attempt to forward a query to an external DNS server uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this // list empty, Envoy will forward all queries to external resolvers repeated DnsVirtualDomain virtual_domains = 2; // This field serves to help Envoy determine whether it can authoritatively answer a query // for a name matching a suffix in this list. If the query name does not match a suffix in // this list, Envoy will forward the query to an upstream DNS server repeated type.matcher.v3.StringMatcher known_suffixes = 3; } ================================================ FILE: api/envoy/data/dns/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/data/dns/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/dns/v4alpha/dns_table.proto ================================================ syntax = "proto3"; package envoy.data.dns.v4alpha; import "envoy/type/matcher/v4alpha/string.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.dns.v4alpha"; option java_outer_classname = "DnsTableProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: DNS Filter Table Data] // :ref:`DNS Filter config overview `. // This message contains the configuration for the DNS Filter if populated // from the control plane message DnsTable { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable"; // This message contains a list of IP addresses returned for a query for a known name message AddressList { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.AddressList"; // This field contains a well formed IP address that is returned in the answer for a // name query. The address field can be an IPv4 or IPv6 address. Address family // detection is done automatically when Envoy parses the string. Since this field is // repeated, Envoy will return as many entries from this list in the DNS response while // keeping the response under 512 bytes repeated string address = 1 [(validate.rules).repeated = { min_items: 1 items {string {min_len: 3}} }]; } // Specify the service protocol using a numeric or string value message DnsServiceProtocol { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsServiceProtocol"; oneof protocol_config { option (validate.required) = true; // Specify the protocol number for the service. Envoy will try to resolve the number to // the protocol name. For example, 6 will resolve to "tcp". Refer to: // https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml // for protocol names and numbers uint32 number = 1 [(validate.rules).uint32 = {lt: 255}]; // Specify the protocol name for the service. string name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; } } // Specify the target for a given DNS service // [#next-free-field: 6] message DnsServiceTarget { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsServiceTarget"; // Specify the name of the endpoint for the Service. The name is a hostname or a cluster oneof endpoint_type { option (validate.required) = true; // Use a resolvable hostname as the endpoint for a service. string host_name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // Use a cluster name as the endpoint for a service. string cluster_name = 2 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; } // The priority of the service record target uint32 priority = 3 [(validate.rules).uint32 = {lt: 65536}]; // The weight of the service record target uint32 weight = 4 [(validate.rules).uint32 = {lt: 65536}]; // The port to which the service is bound. This value is optional if the target is a // cluster. Setting port to zero in this case makes the filter use the port value // from the cluster host uint32 port = 5 [(validate.rules).uint32 = {lt: 65536}]; } // This message defines a service selection record returned for a service query in a domain message DnsService { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsService"; // The name of the service without the protocol or domain name string service_name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The service protocol. This can be specified as a string or the numeric value of the protocol DnsServiceProtocol protocol = 2; // The service entry time to live. This is independent from the DNS Answer record TTL google.protobuf.Duration ttl = 3 [(validate.rules).duration = {gte {seconds: 1}}]; // The list of targets hosting the service repeated DnsServiceTarget targets = 4 [(validate.rules).repeated = {min_items: 1}]; } // Define a list of service records for a given service message DnsServiceList { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsServiceList"; repeated DnsService services = 1 [(validate.rules).repeated = {min_items: 1}]; } message DnsEndpoint { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsEndpoint"; oneof endpoint_config { option (validate.required) = true; // Define a list of addresses to return for the specified endpoint AddressList address_list = 1; // Define a cluster whose addresses are returned for the specified endpoint string cluster_name = 2; // Define a DNS Service List for the specified endpoint DnsServiceList service_list = 3; } } message DnsVirtualDomain { option (udpa.annotations.versioning).previous_message_type = "envoy.data.dns.v3.DnsTable.DnsVirtualDomain"; // A domain name for which Envoy will respond to query requests string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME}]; // The configuration containing the method to determine the address of this endpoint DnsEndpoint endpoint = 2; // Sets the TTL in DNS answers from Envoy returned to the client. The default TTL is 300s google.protobuf.Duration answer_ttl = 3 [(validate.rules).duration = {gte {seconds: 30}}]; } // Control how many times Envoy makes an attempt to forward a query to an external DNS server uint32 external_retry_count = 1 [(validate.rules).uint32 = {lte: 3}]; // Fully qualified domain names for which Envoy will respond to DNS queries. By leaving this // list empty, Envoy will forward all queries to external resolvers repeated DnsVirtualDomain virtual_domains = 2; // This field serves to help Envoy determine whether it can authoritatively answer a query // for a name matching a suffix in this list. If the query name does not match a suffix in // this list, Envoy will forward the query to an upstream DNS server repeated type.matcher.v4alpha.StringMatcher known_suffixes = 3; } ================================================ FILE: api/envoy/data/tap/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/tap/v2alpha/common.proto ================================================ syntax = "proto3"; package envoy.data.tap.v2alpha; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap common data] // Wrapper for tapped body data. This includes HTTP request/response body, transport socket received // and transmitted data, etc. message Body { oneof body_type { // Body data as bytes. By default, tap body data will be present in this field, as the proto // `bytes` type can contain any valid byte. bytes as_bytes = 1; // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING // ` sink // format type is selected. See the documentation for that option for why this is useful. string as_string = 2; } // Specifies whether body data has been truncated to fit within the specified // :ref:`max_buffered_rx_bytes // ` and // :ref:`max_buffered_tx_bytes // ` settings. bool truncated = 3; } ================================================ FILE: api/envoy/data/tap/v2alpha/http.proto ================================================ syntax = "proto3"; package envoy.data.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/data/tap/v2alpha/common.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP tap data] // A fully buffered HTTP trace message. message HttpBufferedTrace { // HTTP message wrapper. message Message { // Message headers. repeated api.v2.core.HeaderValue headers = 1; // Message body. Body body = 2; // Message trailers. repeated api.v2.core.HeaderValue trailers = 3; } // Request message. Message request = 1; // Response message. Message response = 2; } // A streamed HTTP trace segment. Multiple segments make up a full trace. // [#next-free-field: 8] message HttpStreamedTraceSegment { // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used // for long term stable uniqueness. uint64 trace_id = 1; oneof message_piece { // Request headers. api.v2.core.HeaderMap request_headers = 2; // Request body chunk. Body request_body_chunk = 3; // Request trailers. api.v2.core.HeaderMap request_trailers = 4; // Response headers. api.v2.core.HeaderMap response_headers = 5; // Response body chunk. Body response_body_chunk = 6; // Response trailers. api.v2.core.HeaderMap response_trailers = 7; } } ================================================ FILE: api/envoy/data/tap/v2alpha/transport.proto ================================================ syntax = "proto3"; package envoy.data.tap.v2alpha; import "envoy/api/v2/core/address.proto"; import "envoy/data/tap/v2alpha/common.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "TransportProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Transport tap data] // Trace format for the tap transport socket extension. This dumps plain text read/write // sequences on a socket. // Connection properties. message Connection { // Local address. api.v2.core.Address local_address = 2; // Remote address. api.v2.core.Address remote_address = 3; } // Event in a socket trace. message SocketEvent { // Data read by Envoy from the transport socket. message Read { // TODO(htuch): Half-close for reads. // Binary data read. Body data = 1; } // Data written by Envoy to the transport socket. message Write { // Binary data written. Body data = 1; // Stream was half closed after this write. bool end_stream = 2; } // The connection was closed. message Closed { // TODO(mattklein123): Close event type. } // Timestamp for event. google.protobuf.Timestamp timestamp = 1; // Read or write with content as bytes string. oneof event_selector { Read read = 2; Write write = 3; Closed closed = 4; } } // Sequence of read/write events that constitute a buffered trace on a socket. // [#next-free-field: 6] message SocketBufferedTrace { // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used // for long term stable uniqueness. Matches connection IDs used in Envoy logs. uint64 trace_id = 1; // Connection properties. Connection connection = 2; // Sequence of observed events. repeated SocketEvent events = 3; // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes // ` setting. bool read_truncated = 4; // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes // ` setting. bool write_truncated = 5; } // A streamed socket trace segment. Multiple segments make up a full trace. message SocketStreamedTraceSegment { // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used // for long term stable uniqueness. Matches connection IDs used in Envoy logs. uint64 trace_id = 1; oneof message_piece { // Connection properties. Connection connection = 2; // Socket event. SocketEvent event = 3; } } ================================================ FILE: api/envoy/data/tap/v2alpha/wrapper.proto ================================================ syntax = "proto3"; package envoy.data.tap.v2alpha; import "envoy/data/tap/v2alpha/http.proto"; import "envoy/data/tap/v2alpha/transport.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v2alpha"; option java_outer_classname = "WrapperProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap data wrappers] // Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for // sending traces over gRPC APIs or more easily persisting binary messages to files. message TraceWrapper { oneof trace { option (validate.required) = true; // An HTTP buffered tap trace. HttpBufferedTrace http_buffered_trace = 1; // An HTTP streamed tap trace segment. HttpStreamedTraceSegment http_streamed_trace_segment = 2; // A socket buffered tap trace. SocketBufferedTrace socket_buffered_trace = 3; // A socket streamed tap trace segment. SocketStreamedTraceSegment socket_streamed_trace_segment = 4; } } ================================================ FILE: api/envoy/data/tap/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/data/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/data/tap/v3/common.proto ================================================ syntax = "proto3"; package envoy.data.tap.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap common data] // Wrapper for tapped body data. This includes HTTP request/response body, transport socket received // and transmitted data, etc. message Body { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Body"; oneof body_type { // Body data as bytes. By default, tap body data will be present in this field, as the proto // `bytes` type can contain any valid byte. bytes as_bytes = 1; // Body data as string. This field is only used when the :ref:`JSON_BODY_AS_STRING // ` sink // format type is selected. See the documentation for that option for why this is useful. string as_string = 2; } // Specifies whether body data has been truncated to fit within the specified // :ref:`max_buffered_rx_bytes // ` and // :ref:`max_buffered_tx_bytes // ` settings. bool truncated = 3; } ================================================ FILE: api/envoy/data/tap/v3/http.proto ================================================ syntax = "proto3"; package envoy.data.tap.v3; import "envoy/config/core/v3/base.proto"; import "envoy/data/tap/v3/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP tap data] // A fully buffered HTTP trace message. message HttpBufferedTrace { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.HttpBufferedTrace"; // HTTP message wrapper. message Message { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.HttpBufferedTrace.Message"; // Message headers. repeated config.core.v3.HeaderValue headers = 1; // Message body. Body body = 2; // Message trailers. repeated config.core.v3.HeaderValue trailers = 3; } // Request message. Message request = 1; // Response message. Message response = 2; } // A streamed HTTP trace segment. Multiple segments make up a full trace. // [#next-free-field: 8] message HttpStreamedTraceSegment { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.HttpStreamedTraceSegment"; // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used // for long term stable uniqueness. uint64 trace_id = 1; oneof message_piece { // Request headers. config.core.v3.HeaderMap request_headers = 2; // Request body chunk. Body request_body_chunk = 3; // Request trailers. config.core.v3.HeaderMap request_trailers = 4; // Response headers. config.core.v3.HeaderMap response_headers = 5; // Response body chunk. Body response_body_chunk = 6; // Response trailers. config.core.v3.HeaderMap response_trailers = 7; } } ================================================ FILE: api/envoy/data/tap/v3/transport.proto ================================================ syntax = "proto3"; package envoy.data.tap.v3; import "envoy/config/core/v3/address.proto"; import "envoy/data/tap/v3/common.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "TransportProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Transport tap data] // Trace format for the tap transport socket extension. This dumps plain text read/write // sequences on a socket. // Connection properties. message Connection { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.Connection"; // Local address. config.core.v3.Address local_address = 2; // Remote address. config.core.v3.Address remote_address = 3; } // Event in a socket trace. message SocketEvent { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketEvent"; // Data read by Envoy from the transport socket. message Read { // TODO(htuch): Half-close for reads. option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketEvent.Read"; // Binary data read. Body data = 1; } // Data written by Envoy to the transport socket. message Write { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketEvent.Write"; // Binary data written. Body data = 1; // Stream was half closed after this write. bool end_stream = 2; } // The connection was closed. message Closed { // TODO(mattklein123): Close event type. option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketEvent.Closed"; } // Timestamp for event. google.protobuf.Timestamp timestamp = 1; // Read or write with content as bytes string. oneof event_selector { Read read = 2; Write write = 3; Closed closed = 4; } } // Sequence of read/write events that constitute a buffered trace on a socket. // [#next-free-field: 6] message SocketBufferedTrace { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketBufferedTrace"; // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used // for long term stable uniqueness. Matches connection IDs used in Envoy logs. uint64 trace_id = 1; // Connection properties. Connection connection = 2; // Sequence of observed events. repeated SocketEvent events = 3; // Set to true if read events were truncated due to the :ref:`max_buffered_rx_bytes // ` setting. bool read_truncated = 4; // Set to true if write events were truncated due to the :ref:`max_buffered_tx_bytes // ` setting. bool write_truncated = 5; } // A streamed socket trace segment. Multiple segments make up a full trace. message SocketStreamedTraceSegment { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.SocketStreamedTraceSegment"; // Trace ID unique to the originating Envoy only. Trace IDs can repeat and should not be used // for long term stable uniqueness. Matches connection IDs used in Envoy logs. uint64 trace_id = 1; oneof message_piece { // Connection properties. Connection connection = 2; // Socket event. SocketEvent event = 3; } } ================================================ FILE: api/envoy/data/tap/v3/wrapper.proto ================================================ syntax = "proto3"; package envoy.data.tap.v3; import "envoy/data/tap/v3/http.proto"; import "envoy/data/tap/v3/transport.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.data.tap.v3"; option java_outer_classname = "WrapperProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap data wrappers] // Wrapper for all fully buffered and streamed tap traces that Envoy emits. This is required for // sending traces over gRPC APIs or more easily persisting binary messages to files. message TraceWrapper { option (udpa.annotations.versioning).previous_message_type = "envoy.data.tap.v2alpha.TraceWrapper"; oneof trace { option (validate.required) = true; // An HTTP buffered tap trace. HttpBufferedTrace http_buffered_trace = 1; // An HTTP streamed tap trace segment. HttpStreamedTraceSegment http_streamed_trace_segment = 2; // A socket buffered tap trace. SocketBufferedTrace socket_buffered_trace = 3; // A socket streamed tap trace segment. SocketStreamedTraceSegment socket_streamed_trace_segment = 4; } } ================================================ FILE: api/envoy/extensions/access_loggers/file/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v2:pkg", "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/access_loggers/file/v3/file.proto ================================================ syntax = "proto3"; package envoy.extensions.access_loggers.file.v3; import "envoy/config/core/v3/substitution_format_string.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v3"; option java_outer_classname = "FileProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: File access log] // [#extension: envoy.access_loggers.file] // Custom configuration for an :ref:`AccessLog ` // that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* // AccessLog. // [#next-free-field: 6] message FileAccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.FileAccessLog"; // A path to a local file to which to write the access log entries. string path = 1 [(validate.rules).string = {min_len: 1}]; oneof access_log_format { // Access log :ref:`format string`. // Envoy supports :ref:`custom access log formats ` as well as a // :ref:`default format `. // This field is deprecated. // Please use :ref:`log_format `. string format = 2 [deprecated = true]; // Access log :ref:`format dictionary`. All values // are rendered as strings. // This field is deprecated. // Please use :ref:`log_format `. google.protobuf.Struct json_format = 3 [deprecated = true]; // Access log :ref:`format dictionary`. Values are // rendered as strings, numbers, or boolean values as appropriate. Nested JSON objects may // be produced by some command operators (e.g.FILTER_STATE or DYNAMIC_METADATA). See the // documentation for a specific command operator for details. // This field is deprecated. // Please use :ref:`log_format `. google.protobuf.Struct typed_json_format = 4 [deprecated = true]; // Configuration to form access log data and format. // If not specified, use :ref:`default format `. config.core.v3.SubstitutionFormatString log_format = 5 [(validate.rules).message = {required: true}]; } } ================================================ FILE: api/envoy/extensions/access_loggers/file/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/access_loggers/file/v4alpha/file.proto ================================================ syntax = "proto3"; package envoy.extensions.access_loggers.file.v4alpha; import "envoy/config/core/v4alpha/substitution_format_string.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.access_loggers.file.v4alpha"; option java_outer_classname = "FileProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: File access log] // [#extension: envoy.access_loggers.file] // Custom configuration for an :ref:`AccessLog ` // that writes log entries directly to a file. Configures the built-in *envoy.access_loggers.file* // AccessLog. // [#next-free-field: 6] message FileAccessLog { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.access_loggers.file.v3.FileAccessLog"; reserved 2, 3, 4; reserved "format", "json_format", "typed_json_format"; // A path to a local file to which to write the access log entries. string path = 1 [(validate.rules).string = {min_len: 1}]; oneof access_log_format { // Configuration to form access log data and format. // If not specified, use :ref:`default format `. config.core.v4alpha.SubstitutionFormatString log_format = 5 [(validate.rules).message = {required: true}]; } } ================================================ FILE: api/envoy/extensions/access_loggers/grpc/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v2:pkg", "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/access_loggers/grpc/v3/als.proto ================================================ syntax = "proto3"; package envoy.extensions.access_loggers.grpc.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.access_loggers.grpc.v3"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Access Log Service (ALS)] // Configuration for the built-in *envoy.access_loggers.http_grpc* // :ref:`AccessLog `. This configuration will // populate :ref:`StreamAccessLogsMessage.http_logs // `. // [#extension: envoy.access_loggers.http_grpc] message HttpGrpcAccessLogConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.HttpGrpcAccessLogConfig"; CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; // Additional request headers to log in :ref:`HTTPRequestProperties.request_headers // `. repeated string additional_request_headers_to_log = 2; // Additional response headers to log in :ref:`HTTPResponseProperties.response_headers // `. repeated string additional_response_headers_to_log = 3; // Additional response trailers to log in :ref:`HTTPResponseProperties.response_trailers // `. repeated string additional_response_trailers_to_log = 4; } // Configuration for the built-in *envoy.access_loggers.tcp_grpc* type. This configuration will // populate *StreamAccessLogsMessage.tcp_logs*. // [#extension: envoy.access_loggers.tcp_grpc] message TcpGrpcAccessLogConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.TcpGrpcAccessLogConfig"; CommonGrpcAccessLogConfig common_config = 1 [(validate.rules).message = {required: true}]; } // Common configuration for gRPC access logs. // [#next-free-field: 7] message CommonGrpcAccessLogConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.accesslog.v2.CommonGrpcAccessLogConfig"; // The friendly name of the access log to be returned in :ref:`StreamAccessLogsMessage.Identifier // `. This allows the // access log server to differentiate between different access logs coming from the same Envoy. string log_name = 1 [(validate.rules).string = {min_len: 1}]; // The gRPC service for the access log service. config.core.v3.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; // API version for access logs service transport protocol. This describes the access logs service // gRPC endpoint and version of messages used on the wire. config.core.v3.ApiVersion transport_api_version = 6 [(validate.rules).enum = {defined_only: true}]; // Interval for flushing access logs to the gRPC stream. Logger will flush requests every time // this interval is elapsed, or when batch size limit is hit, whichever comes first. Defaults to // 1 second. google.protobuf.Duration buffer_flush_interval = 3 [(validate.rules).duration = {gt {}}]; // Soft size limit in bytes for access log entries buffer. Logger will buffer requests until // this limit it hit, or every time flush interval is elapsed, whichever comes first. Setting it // to zero effectively disables the batching. Defaults to 16384. google.protobuf.UInt32Value buffer_size_bytes = 4; // Additional filter state objects to log in :ref:`filter_state_objects // `. // Logger will call `FilterState::Object::serializeAsProto` to serialize the filter state object. repeated string filter_state_objects_to_log = 5; } ================================================ FILE: api/envoy/extensions/access_loggers/wasm/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/extensions/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/access_loggers/wasm/v3/wasm.proto ================================================ syntax = "proto3"; package envoy.extensions.access_loggers.wasm.v3; import "envoy/extensions/wasm/v3/wasm.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.access_loggers.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm access log] // [#extension: envoy.access_loggers.wasm] // Custom configuration for an :ref:`AccessLog ` // that calls into a WASM VM. Configures the built-in *envoy.access_loggers.wasm* // AccessLog. message WasmAccessLog { envoy.extensions.wasm.v3.PluginConfig config = 1; } ================================================ FILE: api/envoy/extensions/clusters/aggregate/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/cluster/aggregate/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/clusters/aggregate/v3/cluster.proto ================================================ syntax = "proto3"; package envoy.extensions.clusters.aggregate.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.clusters.aggregate.v3"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Aggregate cluster configuration] // Configuration for the aggregate cluster. See the :ref:`architecture overview // ` for more information. // [#extension: envoy.clusters.aggregate] message ClusterConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.aggregate.v2alpha.ClusterConfig"; // Load balancing clusters in aggregate cluster. Clusters are prioritized based on the order they // appear in this list. repeated string clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/extensions/clusters/dynamic_forward_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/clusters/dynamic_forward_proxy/v3/cluster.proto ================================================ syntax = "proto3"; package envoy.extensions.clusters.dynamic_forward_proxy.v3; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.clusters.dynamic_forward_proxy.v3"; option java_outer_classname = "ClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy cluster configuration] // Configuration for the dynamic forward proxy cluster. See the :ref:`architecture overview // ` for more information. // [#extension: envoy.clusters.dynamic_forward_proxy] message ClusterConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.dynamic_forward_proxy.v2alpha.ClusterConfig"; // The DNS cache configuration that the cluster will attach to. Note this configuration must // match that of associated :ref:`dynamic forward proxy HTTP filter configuration // `. common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; // If true allow the cluster configuration to disable the auto_sni and auto_san_validation options // in the :ref:`cluster's upstream_http_protocol_options // ` bool allow_insecure_cluster_options = 2; } ================================================ FILE: api/envoy/extensions/clusters/redis/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/cluster/redis:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/clusters/redis/v3/redis_cluster.proto ================================================ syntax = "proto3"; package envoy.extensions.clusters.redis.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.clusters.redis.v3"; option java_outer_classname = "RedisClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Redis Cluster Configuration] // This cluster adds support for `Redis Cluster `_, as part // of :ref:`Envoy's support for Redis Cluster `. // // Redis Cluster is an extension of Redis which supports sharding and high availability (where a // shard that loses its primary fails over to a replica, and designates it as the new primary). // However, as there is no unified frontend or proxy service in front of Redis Cluster, the client // (in this case Envoy) must locally maintain the state of the Redis Cluster, specifically the // topology. A random node in the cluster is queried for the topology using the `CLUSTER SLOTS // command `_. This result is then stored locally, and // updated at user-configured intervals. // // Additionally, if // :ref:`enable_redirection` // is true, then moved and ask redirection errors from upstream servers will trigger a topology // refresh when they exceed a user-configured error threshold. // // Example: // // .. code-block:: yaml // // name: name // connect_timeout: 0.25s // dns_lookup_family: V4_ONLY // hosts: // - socket_address: // address: foo.bar.com // port_value: 22120 // cluster_type: // name: envoy.clusters.redis // typed_config: // "@type": type.googleapis.com/google.protobuf.Struct // value: // cluster_refresh_rate: 30s // cluster_refresh_timeout: 0.5s // redirect_refresh_interval: 10s // redirect_refresh_threshold: 10 // [#extension: envoy.clusters.redis] // [#next-free-field: 7] message RedisClusterConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.cluster.redis.RedisClusterConfig"; // Interval between successive topology refresh requests. If not set, this defaults to 5s. google.protobuf.Duration cluster_refresh_rate = 1 [(validate.rules).duration = {gt {}}]; // Timeout for topology refresh request. If not set, this defaults to 3s. google.protobuf.Duration cluster_refresh_timeout = 2 [(validate.rules).duration = {gt {}}]; // The minimum interval that must pass after triggering a topology refresh request before a new // request can possibly be triggered again. Any errors received during one of these // time intervals are ignored. If not set, this defaults to 5s. google.protobuf.Duration redirect_refresh_interval = 3; // The number of redirection errors that must be received before // triggering a topology refresh request. If not set, this defaults to 5. // If this is set to 0, topology refresh after redirect is disabled. google.protobuf.UInt32Value redirect_refresh_threshold = 4; // The number of failures that must be received before triggering a topology refresh request. // If not set, this defaults to 0, which disables the topology refresh due to failure. uint32 failure_refresh_threshold = 5; // The number of hosts became degraded or unhealthy before triggering a topology refresh request. // If not set, this defaults to 0, which disables the topology refresh due to degraded or // unhealthy host. uint32 host_degraded_refresh_threshold = 6; } ================================================ FILE: api/envoy/extensions/common/dynamic_forward_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/cluster/v3:pkg", "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto ================================================ syntax = "proto3"; package envoy.extensions.common.dynamic_forward_proxy.v3; import "envoy/config/cluster/v3/cluster.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.dynamic_forward_proxy.v3"; option java_outer_classname = "DnsCacheProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy common configuration] // Configuration of circuit breakers for resolver. message DnsCacheCircuitBreakers { // The maximum number of pending requests that Envoy will allow to the // resolver. If not specified, the default is 1024. google.protobuf.UInt32Value max_pending_requests = 1; } // Configuration for the dynamic forward proxy DNS cache. See the :ref:`architecture overview // ` for more information. // [#next-free-field: 9] message DnsCacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.dynamic_forward_proxy.v2alpha.DnsCacheConfig"; // The name of the cache. Multiple named caches allow independent dynamic forward proxy // configurations to operate within a single Envoy process using different configurations. All // configurations with the same name *must* otherwise have the same settings when referenced // from different configuration components. Configuration will fail to load if this is not // the case. string name = 1 [(validate.rules).string = {min_len: 1}]; // The DNS lookup family to use during resolution. // // [#comment:TODO(mattklein123): Figure out how to support IPv4/IPv6 "happy eyeballs" mode. The // way this might work is a new lookup family which returns both IPv4 and IPv6 addresses, and // then configures a host to have a primary and fall back address. With this, we could very // likely build a "happy eyeballs" connection pool which would race the primary / fall back // address and return the one that wins. This same method could potentially also be used for // QUIC to TCP fall back.] config.cluster.v3.Cluster.DnsLookupFamily dns_lookup_family = 2 [(validate.rules).enum = {defined_only: true}]; // The DNS refresh rate for currently cached DNS hosts. If not specified defaults to 60s. // // .. note: // // The returned DNS TTL is not currently used to alter the refresh rate. This feature will be // added in a future change. // // .. note: // // The refresh rate is rounded to the closest millisecond, and must be at least 1ms. google.protobuf.Duration dns_refresh_rate = 3 [(validate.rules).duration = {gte {nanos: 1000000}}]; // The TTL for hosts that are unused. Hosts that have not been used in the configured time // interval will be purged. If not specified defaults to 5m. // // .. note: // // The TTL is only checked at the time of DNS refresh, as specified by *dns_refresh_rate*. This // means that if the configured TTL is shorter than the refresh rate the host may not be removed // immediately. // // .. note: // // The TTL has no relation to DNS TTL and is only used to control Envoy's resource usage. google.protobuf.Duration host_ttl = 4 [(validate.rules).duration = {gt {}}]; // The maximum number of hosts that the cache will hold. If not specified defaults to 1024. // // .. note: // // The implementation is approximate and enforced independently on each worker thread, thus // it is possible for the maximum hosts in the cache to go slightly above the configured // value depending on timing. This is similar to how other circuit breakers work. google.protobuf.UInt32Value max_hosts = 5 [(validate.rules).uint32 = {gt: 0}]; // If the DNS failure refresh rate is specified, // this is used as the cache's DNS refresh rate when DNS requests are failing. If this setting is // not specified, the failure refresh rate defaults to the dns_refresh_rate. config.cluster.v3.Cluster.RefreshRate dns_failure_refresh_rate = 6; // The config of circuit breakers for resolver. It provides a configurable threshold. // If `envoy.reloadable_features.enable_dns_cache_circuit_breakers` is enabled, // envoy will use dns cache circuit breakers with default settings even if this value is not set. DnsCacheCircuitBreakers dns_cache_circuit_breaker = 7; // [#next-major-version: Reconcile DNS options in a single message.] // Always use TCP queries instead of UDP queries for DNS lookups. // Setting this value causes failure if the // ``envoy.restart_features.use_apple_api_for_dns_lookups`` runtime value is true during // server startup. Apple' API only uses UDP for DNS resolution. bool use_tcp_for_dns_lookups = 8; } ================================================ FILE: api/envoy/extensions/common/ratelimit/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/ratelimit:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/common/ratelimit/v3/ratelimit.proto ================================================ syntax = "proto3"; package envoy.extensions.common.ratelimit.v3; import "envoy/type/v3/ratelimit_unit.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.ratelimit.v3"; option java_outer_classname = "RatelimitProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common rate limit components] // A RateLimitDescriptor is a list of hierarchical entries that are used by the service to // determine the final rate limit key and overall allowed limit. Here are some examples of how // they might be used for the domain "envoy". // // .. code-block:: cpp // // ["authenticated": "false"], ["remote_address": "10.0.0.1"] // // What it does: Limits all unauthenticated traffic for the IP address 10.0.0.1. The // configuration supplies a default limit for the *remote_address* key. If there is a desire to // raise the limit for 10.0.0.1 or block it entirely it can be specified directly in the // configuration. // // .. code-block:: cpp // // ["authenticated": "false"], ["path": "/foo/bar"] // // What it does: Limits all unauthenticated traffic globally for a specific path (or prefix if // configured that way in the service). // // .. code-block:: cpp // // ["authenticated": "false"], ["path": "/foo/bar"], ["remote_address": "10.0.0.1"] // // What it does: Limits unauthenticated traffic to a specific path for a specific IP address. // Like (1) we can raise/block specific IP addresses if we want with an override configuration. // // .. code-block:: cpp // // ["authenticated": "true"], ["client_id": "foo"] // // What it does: Limits all traffic for an authenticated client "foo" // // .. code-block:: cpp // // ["authenticated": "true"], ["client_id": "foo"], ["path": "/foo/bar"] // // What it does: Limits traffic to a specific path for an authenticated client "foo" // // The idea behind the API is that (1)/(2)/(3) and (4)/(5) can be sent in 1 request if desired. // This enables building complex application scenarios with a generic backend. // // Optionally the descriptor can contain a limit override under a "limit" key, that specifies // the number of requests per unit to use instead of the number configured in the // rate limiting service. message RateLimitDescriptor { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ratelimit.RateLimitDescriptor"; message Entry { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.ratelimit.RateLimitDescriptor.Entry"; // Descriptor key. string key = 1 [(validate.rules).string = {min_len: 1}]; // Descriptor value. string value = 2 [(validate.rules).string = {min_len: 1}]; } // Override rate limit to apply to this descriptor instead of the limit // configured in the rate limit service. See :ref:`rate limit override // ` for more information. message RateLimitOverride { // The number of requests per unit of time. uint32 requests_per_unit = 1; // The unit of time. type.v3.RateLimitUnit unit = 2 [(validate.rules).enum = {defined_only: true}]; } // Descriptor entries. repeated Entry entries = 1 [(validate.rules).repeated = {min_items: 1}]; // Optional rate limit override to supply to the ratelimit service. RateLimitOverride limit = 2; } ================================================ FILE: api/envoy/extensions/common/tap/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/common/tap/v2alpha:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/extensions/common/tap/v3/common.proto ================================================ syntax = "proto3"; package envoy.extensions.common.tap.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/tap/v3/common.proto"; import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.tap.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common tap extension configuration] // Common configuration for all tap extensions. message CommonExtensionConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.tap.v2alpha.CommonExtensionConfig"; // [#not-implemented-hide:] message TapDSConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.tap.v2alpha.CommonExtensionConfig.TapDSConfig"; // Configuration for the source of TapDS updates for this Cluster. config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // Tap config to request from XDS server. string name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // Resource locator for TAP. This is mutually exclusive to *name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator tap_resource_locator = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } oneof config_type { option (validate.required) = true; // If specified, the tap filter will be configured via an admin handler. AdminConfig admin_config = 1; // If specified, the tap filter will be configured via a static configuration that cannot be // changed. config.tap.v3.TapConfig static_config = 2; // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. TapDSConfig tapds_config = 3; } } // Configuration for the admin handler. See :ref:`here ` for // more information. message AdminConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.common.tap.v2alpha.AdminConfig"; // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. string config_id = 1 [(validate.rules).string = {min_len: 1}]; } ================================================ FILE: api/envoy/extensions/common/tap/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/tap/v4alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/extensions/common/tap/v4alpha/common.proto ================================================ syntax = "proto3"; package envoy.extensions.common.tap.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/tap/v4alpha/common.proto"; import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.common.tap.v4alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Common tap extension configuration] // Common configuration for all tap extensions. message CommonExtensionConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.common.tap.v3.CommonExtensionConfig"; // [#not-implemented-hide:] message TapDSConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.common.tap.v3.CommonExtensionConfig.TapDSConfig"; // Configuration for the source of TapDS updates for this Cluster. config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; oneof name_specifier { // Tap config to request from XDS server. string name = 2; // Resource locator for TAP. This is mutually exclusive to *name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator tap_resource_locator = 3; } } oneof config_type { option (validate.required) = true; // If specified, the tap filter will be configured via an admin handler. AdminConfig admin_config = 1; // If specified, the tap filter will be configured via a static configuration that cannot be // changed. config.tap.v4alpha.TapConfig static_config = 2; // [#not-implemented-hide:] Configuration to use for TapDS updates for the filter. TapDSConfig tapds_config = 3; } } // Configuration for the admin handler. See :ref:`here ` for // more information. message AdminConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.common.tap.v3.AdminConfig"; // Opaque configuration ID. When requests are made to the admin handler, the passed opaque ID is // matched to the configured filter opaque ID to determine which filter to configure. string config_id = 1 [(validate.rules).string = {min_len: 1}]; } ================================================ FILE: api/envoy/extensions/compression/gzip/compressor/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/compression/gzip/compressor/v3/gzip.proto ================================================ syntax = "proto3"; package envoy.extensions.compression.gzip.compressor.v3; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.compressor.v3"; option java_outer_classname = "GzipProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Gzip Compressor] // [#extension: envoy.compression.gzip.compressor] // [#next-free-field: 6] message Gzip { // All the values of this enumeration translate directly to zlib's compression strategies. // For more information about each strategy, please refer to zlib manual. enum CompressionStrategy { DEFAULT_STRATEGY = 0; FILTERED = 1; HUFFMAN_ONLY = 2; RLE = 3; FIXED = 4; } enum CompressionLevel { option allow_alias = true; DEFAULT_COMPRESSION = 0; BEST_SPEED = 1; COMPRESSION_LEVEL_1 = 1; COMPRESSION_LEVEL_2 = 2; COMPRESSION_LEVEL_3 = 3; COMPRESSION_LEVEL_4 = 4; COMPRESSION_LEVEL_5 = 5; COMPRESSION_LEVEL_6 = 6; COMPRESSION_LEVEL_7 = 7; COMPRESSION_LEVEL_8 = 8; COMPRESSION_LEVEL_9 = 9; BEST_COMPRESSION = 9; } // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values // use more memory, but are faster and produce better compression results. The default value is 5. google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; // A value used for selecting the zlib compression level. This setting will affect speed and // amount of compression applied to the content. "BEST_COMPRESSION" provides higher compression // at the cost of higher latency and is equal to "COMPRESSION_LEVEL_9". "BEST_SPEED" provides // lower compression with minimum impact on response time, the same as "COMPRESSION_LEVEL_1". // "DEFAULT_COMPRESSION" provides an optimal result between speed and compression. According // to zlib's manual this level gives the same result as "COMPRESSION_LEVEL_6". // This field will be set to "DEFAULT_COMPRESSION" if not specified. CompressionLevel compression_level = 2 [(validate.rules).enum = {defined_only: true}]; // A value used for selecting the zlib compression strategy which is directly related to the // characteristics of the content. Most of the time "DEFAULT_STRATEGY" will be the best choice, // which is also the default value for the parameter, though there are situations when // changing this parameter might produce better results. For example, run-length encoding (RLE) // is typically used when the content is known for having sequences which same data occurs many // consecutive times. For more information about each strategy, please refer to zlib manual. CompressionStrategy compression_strategy = 3 [(validate.rules).enum = {defined_only: true}]; // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. // Larger window results in better compression at the expense of memory usage. The default is 12 // which will produce a 4096 bytes window. For more details about this parameter, please refer to // zlib manual > deflateInit2. google.protobuf.UInt32Value window_bits = 4 [(validate.rules).uint32 = {lte: 15 gte: 9}]; // Value for Zlib's next output buffer. If not set, defaults to 4096. // See https://www.zlib.net/manual.html for more details. Also see // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. google.protobuf.UInt32Value chunk_size = 5 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; } ================================================ FILE: api/envoy/extensions/compression/gzip/decompressor/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/compression/gzip/decompressor/v3/gzip.proto ================================================ syntax = "proto3"; package envoy.extensions.compression.gzip.decompressor.v3; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.compression.gzip.decompressor.v3"; option java_outer_classname = "GzipProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Gzip Decompressor] // [#extension: envoy.compression.gzip.decompressor] message Gzip { // Value from 9 to 15 that represents the base two logarithmic of the decompressor's window size. // The decompression window size needs to be equal or larger than the compression window size. // The default is 12 to match the default in the // :ref:`gzip compressor `. // For more details about this parameter, please refer to `zlib manual `_ > inflateInit2. google.protobuf.UInt32Value window_bits = 1 [(validate.rules).uint32 = {lte: 15 gte: 9}]; // Value for zlib's decompressor output buffer. If not set, defaults to 4096. // See https://www.zlib.net/manual.html for more details. google.protobuf.UInt32Value chunk_size = 2 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; } ================================================ FILE: api/envoy/extensions/filters/common/fault/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/filter/fault/v2:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/common/fault/v3/fault.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.common.fault.v3; import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.common.fault.v3"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common fault injection types] // Delay specification is used to inject latency into the // HTTP/gRPC/Mongo/Redis operation or delay proxying of TCP connections. // [#next-free-field: 6] message FaultDelay { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.fault.v2.FaultDelay"; enum FaultDelayType { // Unused and deprecated. FIXED = 0; } // Fault delays are controlled via an HTTP header (if applicable). See the // :ref:`HTTP fault filter ` documentation for // more information. message HeaderDelay { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.fault.v2.FaultDelay.HeaderDelay"; } reserved 2, 1; reserved "type"; oneof fault_delay_secifier { option (validate.required) = true; // Add a fixed delay before forwarding the operation upstream. See // https://developers.google.com/protocol-buffers/docs/proto3#json for // the JSON/YAML Duration mapping. For HTTP/Mongo/Redis, the specified // delay will be injected before a new request/operation. For TCP // connections, the proxying of the connection upstream will be delayed // for the specified period. This is required if type is FIXED. google.protobuf.Duration fixed_delay = 3 [(validate.rules).duration = {gt {}}]; // Fault delays are controlled via an HTTP header (if applicable). HeaderDelay header_delay = 5; } // The percentage of operations/connections/requests on which the delay will be injected. type.v3.FractionalPercent percentage = 4; } // Describes a rate limit to be applied. message FaultRateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.fault.v2.FaultRateLimit"; // Describes a fixed/constant rate limit. message FixedLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.fault.v2.FaultRateLimit.FixedLimit"; // The limit supplied in KiB/s. uint64 limit_kbps = 1 [(validate.rules).uint64 = {gte: 1}]; } // Rate limits are controlled via an HTTP header (if applicable). See the // :ref:`HTTP fault filter ` documentation for // more information. message HeaderLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.fault.v2.FaultRateLimit.HeaderLimit"; } oneof limit_type { option (validate.required) = true; // A fixed rate limit. FixedLimit fixed_limit = 1; // Rate limits are controlled via an HTTP header (if applicable). HeaderLimit header_limit = 3; } // The percentage of operations/connections/requests on which the rate limit will be injected. type.v3.FractionalPercent percentage = 2; } ================================================ FILE: api/envoy/extensions/filters/http/adaptive_concurrency/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/adaptive_concurrency/v3/adaptive_concurrency.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.adaptive_concurrency.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.adaptive_concurrency.v3"; option java_outer_classname = "AdaptiveConcurrencyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Adaptive Concurrency] // Adaptive Concurrency Control :ref:`configuration overview // `. // [#extension: envoy.filters.http.adaptive_concurrency] // Configuration parameters for the gradient controller. message GradientControllerConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig"; // Parameters controlling the periodic recalculation of the concurrency limit from sampled request // latencies. message ConcurrencyLimitCalculationParams { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig." "ConcurrencyLimitCalculationParams"; // The allowed upper-bound on the calculated concurrency limit. Defaults to 1000. google.protobuf.UInt32Value max_concurrency_limit = 2 [(validate.rules).uint32 = {gt: 0}]; // The period of time samples are taken to recalculate the concurrency limit. google.protobuf.Duration concurrency_update_interval = 3 [(validate.rules).duration = { required: true gt {} }]; } // Parameters controlling the periodic minRTT recalculation. // [#next-free-field: 6] message MinimumRTTCalculationParams { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.adaptive_concurrency.v2alpha.GradientControllerConfig." "MinimumRTTCalculationParams"; // The time interval between recalculating the minimum request round-trip time. Has to be // positive. google.protobuf.Duration interval = 1 [(validate.rules).duration = { required: true gte {nanos: 1000000} }]; // The number of requests to aggregate/sample during the minRTT recalculation window before // updating. Defaults to 50. google.protobuf.UInt32Value request_count = 2 [(validate.rules).uint32 = {gt: 0}]; // Randomized time delta that will be introduced to the start of the minRTT calculation window. // This is represented as a percentage of the interval duration. Defaults to 15%. // // Example: If the interval is 10s and the jitter is 15%, the next window will begin // somewhere in the range (10s - 11.5s). type.v3.Percent jitter = 3; // The concurrency limit set while measuring the minRTT. Defaults to 3. google.protobuf.UInt32Value min_concurrency = 4 [(validate.rules).uint32 = {gt: 0}]; // Amount added to the measured minRTT to add stability to the concurrency limit during natural // variability in latency. This is expressed as a percentage of the measured value and can be // adjusted to allow more or less tolerance to the sampled latency values. // // Defaults to 25%. type.v3.Percent buffer = 5; } // The percentile to use when summarizing aggregated samples. Defaults to p50. type.v3.Percent sample_aggregate_percentile = 1; ConcurrencyLimitCalculationParams concurrency_limit_params = 2 [(validate.rules).message = {required: true}]; MinimumRTTCalculationParams min_rtt_calc_params = 3 [(validate.rules).message = {required: true}]; } message AdaptiveConcurrency { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.adaptive_concurrency.v2alpha.AdaptiveConcurrency"; oneof concurrency_controller_config { option (validate.required) = true; // Gradient concurrency control will be used. GradientControllerConfig gradient_controller_config = 1 [(validate.rules).message = {required: true}]; } // If set to false, the adaptive concurrency filter will operate as a pass-through filter. If the // message is unspecified, the filter will be enabled. config.core.v3.RuntimeFeatureFlag enabled = 2; } ================================================ FILE: api/envoy/extensions/filters/http/admission_control/v3alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/admission_control/v3alpha/admission_control.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.admission_control.v3alpha; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/range.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "google/rpc/status.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.admission_control.v3alpha"; option java_outer_classname = "AdmissionControlProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Admission Control] // [#extension: envoy.filters.http.admission_control] // [#next-free-field: 6] message AdmissionControl { // Default method of specifying what constitutes a successful request. All status codes that // indicate a successful request must be explicitly specified if not relying on the default // values. message SuccessCriteria { message HttpCriteria { // Status code ranges that constitute a successful request. Configurable codes are in the // range [100, 600). repeated type.v3.Int32Range http_success_status = 1 [(validate.rules).repeated = {min_items: 1}]; } message GrpcCriteria { // Status codes that constitute a successful request. // Mappings can be found at: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. repeated uint32 grpc_success_status = 1 [(validate.rules).repeated = {min_items: 1}]; } // If HTTP criteria are unspecified, all HTTP status codes below 500 are treated as successful // responses. // // .. note:: // // The default HTTP codes considered successful by the admission controller are done so due // to the unlikelihood that sending fewer requests would change their behavior (for example: // redirects, unauthorized access, or bad requests won't be alleviated by sending less // traffic). HttpCriteria http_criteria = 1; // GRPC status codes to consider as request successes. If unspecified, defaults to: Ok, // Cancelled, Unknown, InvalidArgument, NotFound, AlreadyExists, Unauthenticated, // FailedPrecondition, OutOfRange, PermissionDenied, and Unimplemented. // // .. note:: // // The default gRPC codes that are considered successful by the admission controller are // chosen because of the unlikelihood that sending fewer requests will change the behavior. GrpcCriteria grpc_criteria = 2; } // If set to false, the admission control filter will operate as a pass-through filter. If the // message is unspecified, the filter will be enabled. config.core.v3.RuntimeFeatureFlag enabled = 1; // Defines how a request is considered a success/failure. oneof evaluation_criteria { option (validate.required) = true; SuccessCriteria success_criteria = 2; } // The sliding time window over which the success rate is calculated. The window is rounded to the // nearest second. Defaults to 30s. google.protobuf.Duration sampling_window = 3; // Rejection probability is defined by the formula:: // // max(0, (rq_count - rq_success_count / sr_threshold) / (rq_count + 1)) ^ (1 / aggression) // // The aggression dictates how heavily the admission controller will throttle requests upon SR // dropping at or below the threshold. A value of 1 will result in a linear increase in // rejection probability as SR drops. Any values less than 1.0, will be set to 1.0. If the // message is unspecified, the aggression is 1.0. See `the admission control documentation // `_ // for a diagram illustrating this. config.core.v3.RuntimeDouble aggression = 4; // Dictates the success rate at which the rejection probability is non-zero. As success rate drops // below this threshold, rejection probability will increase. Any success rate above the threshold // results in a rejection probability of 0. Defaults to 95%. config.core.v3.RuntimePercent sr_threshold = 5; } ================================================ FILE: api/envoy/extensions/filters/http/aws_lambda/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/aws_lambda/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/aws_lambda/v3/aws_lambda.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.aws_lambda.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_lambda.v3"; option java_outer_classname = "AwsLambdaProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AWS Lambda] // AWS Lambda :ref:`configuration overview `. // [#extension: envoy.filters.http.aws_lambda] // AWS Lambda filter config message Config { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.aws_lambda.v2alpha.Config"; enum InvocationMode { // This is the more common mode of invocation, in which Lambda responds after it has completed the function. In // this mode the output of the Lambda function becomes the response of the HTTP request. SYNCHRONOUS = 0; // In this mode Lambda responds immediately but continues to process the function asynchronously. This mode can be // used to signal events for example. In this mode, Lambda responds with an acknowledgment that it received the // call which is translated to an HTTP 200 OK by the filter. ASYNCHRONOUS = 1; } // The ARN of the AWS Lambda to invoke when the filter is engaged // Must be in the following format: // arn::lambda:::function: string arn = 1 [(validate.rules).string = {min_len: 1}]; // Whether to transform the request (headers and body) to a JSON payload or pass it as is. bool payload_passthrough = 2; // Determines the way to invoke the Lambda function. InvocationMode invocation_mode = 3 [(validate.rules).enum = {defined_only: true}]; } // Per-route configuration for AWS Lambda. This can be useful when invoking a different Lambda function or a different // version of the same Lambda depending on the route. message PerRouteConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.aws_lambda.v2alpha.PerRouteConfig"; Config invoke_config = 1; } ================================================ FILE: api/envoy/extensions/filters/http/aws_request_signing/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/aws_request_signing/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/aws_request_signing/v3/aws_request_signing.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.aws_request_signing.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.aws_request_signing.v3"; option java_outer_classname = "AwsRequestSigningProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: AwsRequestSigning] // AwsRequestSigning :ref:`configuration overview `. // [#extension: envoy.filters.http.aws_request_signing] // Top level configuration for the AWS request signing filter. message AwsRequestSigning { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.aws_request_signing.v2alpha.AwsRequestSigning"; // The `service namespace // `_ // of the HTTP endpoint. // // Example: s3 string service_name = 1 [(validate.rules).string = {min_len: 1}]; // The `region `_ hosting the HTTP // endpoint. // // Example: us-west-2 string region = 2 [(validate.rules).string = {min_len: 1}]; // Indicates that before signing headers, the host header will be swapped with // this value. If not set or empty, the original host header value // will be used and no rewrite will happen. // // Note: this rewrite affects both signing and host header forwarding. However, this // option shouldn't be used with // :ref:`HCM host rewrite ` given that the // value set here would be used for signing whereas the value set in the HCM would be used // for host header forwarding which is not the desired outcome. string host_rewrite = 3; } ================================================ FILE: api/envoy/extensions/filters/http/buffer/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/buffer/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/buffer/v3/buffer.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.buffer.v3; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.buffer.v3"; option java_outer_classname = "BufferProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Buffer] // Buffer :ref:`configuration overview `. // [#extension: envoy.filters.http.buffer] message Buffer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.buffer.v2.Buffer"; reserved 2; // The maximum request size that the filter will buffer before the connection // manager will stop buffering and return a 413 response. google.protobuf.UInt32Value max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}, (validate.rules).message = {required: true}]; } message BufferPerRoute { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.buffer.v2.BufferPerRoute"; oneof override { option (validate.required) = true; // Disable the buffer filter for this particular vhost or route. bool disabled = 1 [(validate.rules).bool = {const: true}]; // Override the global configuration of the filter with this new config. Buffer buffer = 2 [(validate.rules).message = {required: true}]; } } ================================================ FILE: api/envoy/extensions/filters/http/cache/v3alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/cache/v2alpha:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/cache/v3alpha/cache.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.cache.v3alpha; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/string.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v3alpha"; option java_outer_classname = "CacheProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Cache Filter] // [#extension: envoy.filters.http.cache] message CacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.cache.v2alpha.CacheConfig"; // [#not-implemented-hide:] // Modifies cache key creation by restricting which parts of the URL are included. message KeyCreatorParams { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.cache.v2alpha.CacheConfig.KeyCreatorParams"; // If true, exclude the URL scheme from the cache key. Set to true if your origins always // produce the same response for http and https requests. bool exclude_scheme = 1; // If true, exclude the host from the cache key. Set to true if your origins' responses don't // ever depend on host. bool exclude_host = 2; // If *query_parameters_included* is nonempty, only query parameters matched // by one or more of its matchers are included in the cache key. Any other // query params will not affect cache lookup. repeated config.route.v3.QueryParameterMatcher query_parameters_included = 3; // If *query_parameters_excluded* is nonempty, query parameters matched by one // or more of its matchers are excluded from the cache key (even if also // matched by *query_parameters_included*), and will not affect cache lookup. repeated config.route.v3.QueryParameterMatcher query_parameters_excluded = 4; } // Config specific to the cache storage implementation. google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; // List of matching rules that defines allowed *Vary* headers. // // The *vary* response header holds a list of header names that affect the // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't matched by any rules in // *allowed_vary_headers*, that response will not be cached. // // During lookup, *allowed_vary_headers* controls what request headers will be // sent to the cache storage implementation. repeated type.matcher.v3.StringMatcher allowed_vary_headers = 2; // [#not-implemented-hide:] // // // Modifies cache key creation by restricting which parts of the URL are included. KeyCreatorParams key_creator_params = 3; // [#not-implemented-hide:] // // // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache // storage implementation may have its own limit beyond which it will reject insertions). uint32 max_body_bytes = 4; } ================================================ FILE: api/envoy/extensions/filters/http/cache/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/route/v4alpha:pkg", "//envoy/extensions/filters/http/cache/v3alpha:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/cache/v4alpha/cache.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.cache.v4alpha; import "envoy/config/route/v4alpha/route_components.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.cache.v4alpha"; option java_outer_classname = "CacheProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: HTTP Cache Filter] // [#extension: envoy.filters.http.cache] message CacheConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.cache.v3alpha.CacheConfig"; // [#not-implemented-hide:] // Modifies cache key creation by restricting which parts of the URL are included. message KeyCreatorParams { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.cache.v3alpha.CacheConfig.KeyCreatorParams"; // If true, exclude the URL scheme from the cache key. Set to true if your origins always // produce the same response for http and https requests. bool exclude_scheme = 1; // If true, exclude the host from the cache key. Set to true if your origins' responses don't // ever depend on host. bool exclude_host = 2; // If *query_parameters_included* is nonempty, only query parameters matched // by one or more of its matchers are included in the cache key. Any other // query params will not affect cache lookup. repeated config.route.v4alpha.QueryParameterMatcher query_parameters_included = 3; // If *query_parameters_excluded* is nonempty, query parameters matched by one // or more of its matchers are excluded from the cache key (even if also // matched by *query_parameters_included*), and will not affect cache lookup. repeated config.route.v4alpha.QueryParameterMatcher query_parameters_excluded = 4; } // Config specific to the cache storage implementation. google.protobuf.Any typed_config = 1 [(validate.rules).any = {required: true}]; // List of matching rules that defines allowed *Vary* headers. // // The *vary* response header holds a list of header names that affect the // contents of a response, as described by // https://httpwg.org/specs/rfc7234.html#caching.negotiated.responses. // // During insertion, *allowed_vary_headers* acts as a allowlist: if a // response's *vary* header mentions any header names that aren't matched by any rules in // *allowed_vary_headers*, that response will not be cached. // // During lookup, *allowed_vary_headers* controls what request headers will be // sent to the cache storage implementation. repeated type.matcher.v4alpha.StringMatcher allowed_vary_headers = 2; // [#not-implemented-hide:] // // // Modifies cache key creation by restricting which parts of the URL are included. KeyCreatorParams key_creator_params = 3; // [#not-implemented-hide:] // // // Max body size the cache filter will insert into a cache. 0 means unlimited (though the cache // storage implementation may have its own limit beyond which it will reject insertions). uint32 max_body_bytes = 4; } ================================================ FILE: api/envoy/extensions/filters/http/cdn_loop/v3alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/filters/http/cdn_loop/v3alpha/cdn_loop.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.cdn_loop.v3alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.cdn_loop.v3alpha"; option java_outer_classname = "CdnLoopProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP CDN-Loop Filter] // [#extension: envoy.filters.http.cdn_loop] // CDN-Loop Header filter config. See the :ref:`configuration overview // ` for more information. message CdnLoopConfig { // The CDN identifier to use for loop checks and to append to the // CDN-Loop header. // // RFC 8586 calls this the cdn-id. The cdn-id can either be a // pseudonym or hostname the CDN is in control of. // // cdn_id must not be empty. string cdn_id = 1 [(validate.rules).string = {min_len: 1}]; // The maximum allowed count of cdn_id in the downstream CDN-Loop // request header. // // The default of 0 means a request can transit the CdnLoopFilter // once. A value of 1 means that a request can transit the // CdnLoopFilter twice and so on. uint32 max_allowed_occurrences = 2; } ================================================ FILE: api/envoy/extensions/filters/http/compressor/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/compressor/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/compressor/v3/compressor.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.compressor.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/extension.proto"; import "google/protobuf/any.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.compressor.v3"; option java_outer_classname = "CompressorProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Compressor] // Compressor :ref:`configuration overview `. // [#extension: envoy.filters.http.compressor] // [#next-free-field: 7] message Compressor { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.compressor.v2.Compressor"; // Minimum response length, in bytes, which will trigger compression. The default value is 30. google.protobuf.UInt32Value content_length = 1; // Set of strings that allows specifying which mime-types yield compression; e.g., // application/json, text/html, etc. When this field is not defined, compression will be applied // to the following mime-types: "application/javascript", "application/json", // "application/xhtml+xml", "image/svg+xml", "text/css", "text/html", "text/plain", "text/xml" // and their synonyms. repeated string content_type = 2; // If true, disables compression when the response contains an etag header. When it is false, the // filter will preserve weak etags and remove the ones that require strong validation. bool disable_on_etag_header = 3; // If true, removes accept-encoding from the request headers before dispatching it to the upstream // so that responses do not get compressed before reaching the filter. // .. attention: // // To avoid interfering with other compression filters in the same chain use this option in // the filter closest to the upstream. bool remove_accept_encoding_header = 4; // Runtime flag that controls whether the filter is enabled or not. If set to false, the // filter will operate as a pass-through filter. If not specified, defaults to enabled. config.core.v3.RuntimeFeatureFlag runtime_enabled = 5; // A compressor library to use for compression. Currently only // :ref:`envoy.compression.gzip.compressor` // is included in Envoy. // This field is ignored if used in the context of the gzip http-filter, but is mandatory otherwise. config.core.v3.TypedExtensionConfig compressor_library = 6; } ================================================ FILE: api/envoy/extensions/filters/http/cors/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/cors/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/cors/v3/cors.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.cors.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.cors.v3"; option java_outer_classname = "CorsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Cors] // CORS Filter :ref:`configuration overview `. // [#extension: envoy.filters.http.cors] // Cors filter config. message Cors { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.cors.v2.Cors"; } ================================================ FILE: api/envoy/extensions/filters/http/csrf/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/csrf/v2:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/csrf/v3/csrf.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.csrf.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/matcher/v3/string.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v3"; option java_outer_classname = "CsrfProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: CSRF] // Cross-Site Request Forgery :ref:`configuration overview `. // [#extension: envoy.filters.http.csrf] // CSRF filter config. message CsrfPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.csrf.v2.CsrfPolicy"; // Specifies the % of requests for which the CSRF filter is enabled. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests to filter. // // .. note:: // // This field defaults to 100/:ref:`HUNDRED // `. config.core.v3.RuntimeFractionalPercent filter_enabled = 1 [(validate.rules).message = {required: true}]; // Specifies that CSRF policies will be evaluated and tracked, but not enforced. // // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate // and track the request's *Origin* and *Destination* to determine if it's valid, but will not // enforce any policies. config.core.v3.RuntimeFractionalPercent shadow_enabled = 2; // Specifies additional source origins that will be allowed in addition to // the destination origin. // // More information on how this can be configured via runtime can be found // :ref:`here `. repeated type.matcher.v3.StringMatcher additional_origins = 3; } ================================================ FILE: api/envoy/extensions/filters/http/csrf/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/filters/http/csrf/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/csrf/v4alpha/csrf.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.csrf.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.csrf.v4alpha"; option java_outer_classname = "CsrfProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: CSRF] // Cross-Site Request Forgery :ref:`configuration overview `. // [#extension: envoy.filters.http.csrf] // CSRF filter config. message CsrfPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.csrf.v3.CsrfPolicy"; // Specifies the % of requests for which the CSRF filter is enabled. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests to filter. // // .. note:: // // This field defaults to 100/:ref:`HUNDRED // `. config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 1 [(validate.rules).message = {required: true}]; // Specifies that CSRF policies will be evaluated and tracked, but not enforced. // // This is intended to be used when ``filter_enabled`` is off and will be ignored otherwise. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests for which it will evaluate // and track the request's *Origin* and *Destination* to determine if it's valid, but will not // enforce any policies. config.core.v4alpha.RuntimeFractionalPercent shadow_enabled = 2; // Specifies additional source origins that will be allowed in addition to // the destination origin. // // More information on how this can be configured via runtime can be found // :ref:`here `. repeated type.matcher.v4alpha.StringMatcher additional_origins = 3; } ================================================ FILE: api/envoy/extensions/filters/http/decompressor/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/decompressor/v3/decompressor.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.decompressor.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/extension.proto"; import "google/protobuf/any.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.decompressor.v3"; option java_outer_classname = "DecompressorProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Decompressor] // [#extension: envoy.filters.http.decompressor] message Decompressor { // Common configuration for filter behavior on both the request and response direction. message CommonDirectionConfig { // Runtime flag that controls whether the filter is enabled for decompression or not. If set to false, the // filter will operate as a pass-through filter. If the message is unspecified, the filter will be enabled. config.core.v3.RuntimeFeatureFlag enabled = 1; } // Configuration for filter behavior on the request direction. message RequestDirectionConfig { CommonDirectionConfig common_config = 1; // If set to true, and response decompression is enabled, the filter modifies the Accept-Encoding // request header by appending the decompressor_library's encoding. Defaults to true. google.protobuf.BoolValue advertise_accept_encoding = 2; } // Configuration for filter behavior on the response direction. message ResponseDirectionConfig { CommonDirectionConfig common_config = 1; } // A decompressor library to use for both request and response decompression. Currently only // :ref:`envoy.compression.gzip.compressor` // is included in Envoy. config.core.v3.TypedExtensionConfig decompressor_library = 1 [(validate.rules).message = {required: true}]; // Configuration for request decompression. Decompression is enabled by default if left empty. RequestDirectionConfig request_direction_config = 2; // Configuration for response decompression. Decompression is enabled by default if left empty. ResponseDirectionConfig response_direction_config = 3; } ================================================ FILE: api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/dynamic_forward_proxy/v3/dynamic_forward_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.dynamic_forward_proxy.v3; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamic_forward_proxy.v3"; option java_outer_classname = "DynamicForwardProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamic forward proxy] // Configuration for the dynamic forward proxy HTTP filter. See the :ref:`architecture overview // ` for more information. // [#extension: envoy.filters.http.dynamic_forward_proxy] message FilterConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.dynamic_forward_proxy.v2alpha.FilterConfig"; // The DNS cache configuration that the filter will attach to. Note this configuration must // match that of associated :ref:`dynamic forward proxy cluster configuration // `. common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; } // Per route Configuration for the dynamic forward proxy HTTP filter. message PerRouteConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.dynamic_forward_proxy.v2alpha.PerRouteConfig"; oneof host_rewrite_specifier { // Indicates that before DNS lookup, the host header will be swapped with // this value. If not set or empty, the original host header value // will be used and no rewrite will happen. // // Note: this rewrite affects both DNS lookup and host header forwarding. However, this // option shouldn't be used with // :ref:`HCM host rewrite ` given that the // value set here would be used for DNS lookups whereas the value set in the HCM would be used // for host header forwarding which is not the desired outcome. string host_rewrite_literal = 1; // Indicates that before DNS lookup, the host header will be swapped with // the value of this header. If not set or empty, the original host header // value will be used and no rewrite will happen. // // Note: this rewrite affects both DNS lookup and host header forwarding. However, this // option shouldn't be used with // :ref:`HCM host rewrite header ` // given that the value set here would be used for DNS lookups whereas the value set in the HCM // would be used for host header forwarding which is not the desired outcome. string host_rewrite_header = 2; } } ================================================ FILE: api/envoy/extensions/filters/http/dynamo/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/dynamo/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/dynamo/v3/dynamo.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.dynamo.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.dynamo.v3"; option java_outer_classname = "DynamoProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dynamo] // Dynamo :ref:`configuration overview `. // [#extension: envoy.filters.http.dynamo] // Dynamo filter config. message Dynamo { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.dynamo.v2.Dynamo"; } ================================================ FILE: api/envoy/extensions/filters/http/ext_authz/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/ext_authz/v2:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/ext_authz/v3/ext_authz.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.ext_authz.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/config/core/v3/http_uri.proto"; import "envoy/type/matcher/v3/metadata.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/http_status.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v3"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] // [#next-free-field: 15] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthz"; reserved 4; reserved "use_alpha"; // External authorization service configuration. oneof services { // gRPC service configuration (default timeout: 200ms). config.core.v3.GrpcService grpc_service = 1; // HTTP service configuration (default timeout: 200ms). HttpService http_service = 3; } // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and // version of messages used on the wire. config.core.v3.ApiVersion transport_api_version = 12 [(validate.rules).enum = {defined_only: true}]; // Changes filter's behaviour on errors: // // 1. When set to true, the filter will *accept* client request even if the communication with // the authorization service has failed, or if the authorization service has returned a HTTP 5xx // error. // // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* // response if the communication with the authorization service has failed, or if the // authorization service has returned a HTTP 5xx error. // // Note that errors can be *always* tracked in the :ref:`stats // `. bool failure_mode_allow = 2; // Enables filter to buffer the client request body and send it within the authorization request. // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization // request message indicating if the body data is partial. BufferSettings with_request_body = 5; // Clears route cache in order to allow the external authorization service to correctly affect // routing decisions. Filter clears all cached routes when: // // 1. The field is set to *true*. // // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. // // 3. At least one *authorization response header* is added to the client request, or is used for // altering another client request header. // bool clear_route_cache = 6; // Sets the HTTP status that is returned to the client when there is a network error between the // filter and the authorization server. The default status is HTTP 403 Forbidden. type.v3.HttpStatus status_on_error = 7; // Specifies a list of metadata namespaces whose values, if present, will be passed to the // ext_authz service as an opaque *protobuf::Struct*. // // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata // ` is set, // then the following will pass the jwt payload to the authorization server. // // .. code-block:: yaml // // metadata_context_namespaces: // - envoy.filters.http.jwt_authn // repeated string metadata_context_namespaces = 8; // Specifies if the filter is enabled. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests to filter. // // If this field is not specified, the filter will be enabled for all requests. config.core.v3.RuntimeFractionalPercent filter_enabled = 9; // Specifies if the filter is enabled with metadata matcher. // If this field is not specified, the filter will be enabled for all requests. type.matcher.v3.MetadataMatcher filter_enabled_metadata = 14; // Specifies whether to deny the requests, when the filter is disabled. // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to determine whether to deny request for // filter protected path at filter disabling. If filter is disabled in // typed_per_filter_config for the path, requests will not be denied. // // If this field is not specified, all requests will be allowed when disabled. config.core.v3.RuntimeFeatureFlag deny_at_disable = 11; // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 10; // Optional additional prefix to use when emitting statistics. This allows to distinguish // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: // // .. code-block:: yaml // // http_filters: // - name: envoy.filters.http.ext_authz // typed_config: // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. // - name: envoy.filters.http.ext_authz // typed_config: // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. // string stat_prefix = 13; } // Configuration for buffering the request data. message BufferSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.BufferSettings"; // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow // `. uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. // The authorization request will be dispatched and no 413 HTTP error will be returned by the // filter. bool allow_partial_message = 2; // If true, the body sent to the external authorization service is set with raw bytes, it sets // the :ref:`raw_body` // field of HTTP request attribute context. Otherwise, :ref:` // body` will be filled // with UTF-8 string request body. bool pack_as_bytes = 3; } // HttpService is used for raw HTTP communication between the filter and the authorization service. // When configured, the filter will parse the client request and use these attributes to call the // authorization server. Depending on the response, the filter may reject or accept the client // request. Note that in any of these events, metadata can be added, removed or overridden by the // filter: // // *On authorization request*, a list of allowed request headers may be supplied. See // :ref:`allowed_headers // ` // for details. Additional headers metadata may be added to the authorization request. See // :ref:`headers_to_add // ` for // details. // // On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and // additional headers metadata may be added to the original client request. See // :ref:`allowed_upstream_headers // ` // for details. // // On other authorization response statuses, the filter will not allow traffic. Additional headers // metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers // ` // for details. // [#next-free-field: 9] message HttpService { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.HttpService"; reserved 3, 4, 5, 6; // Sets the HTTP server URI which the authorization requests must be sent to. config.core.v3.HttpUri server_uri = 1; // Sets a prefix to the value of authorization request header *Path*. string path_prefix = 2; // Settings used for controlling authorization request metadata. AuthorizationRequest authorization_request = 7; // Settings used for controlling authorization response metadata. AuthorizationResponse authorization_response = 8; } message AuthorizationRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.AuthorizationRequest"; // Authorization request will include the client request headers that have a correspondent match // in the :ref:`list `. Note that in addition to the // user's supplied matchers: // // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. // // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have // a message body. However, the authorization request can include the buffered client request body // (controlled by :ref:`with_request_body // ` setting), // consequently the value of *Content-Length* of the authorization request reflects the size of // its payload size. // type.matcher.v3.ListStringMatcher allowed_headers = 1; // Sets a list of headers that will be included to the request to authorization service. Note that // client request of the same key will be overridden. repeated config.core.v3.HeaderValue headers_to_add = 2; } message AuthorizationResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.AuthorizationResponse"; // When this :ref:`list ` is set, authorization // response headers that have a correspondent match will be added to the original client request. // Note that coexistent headers will be overridden. type.matcher.v3.ListStringMatcher allowed_upstream_headers = 1; // When this :ref:`list ` is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that coexistent headers will be appended. type.matcher.v3.ListStringMatcher allowed_upstream_headers_to_append = 3; // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority // (Host)* will be in the response to the client. When a header is included in this list, *Path*, // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. type.matcher.v3.ListStringMatcher allowed_client_headers = 2; } // Extra settings on a per virtualhost/route/weighted-cluster level. message ExtAuthzPerRoute { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.ExtAuthzPerRoute"; oneof override { option (validate.required) = true; // Disable the ext auth filter for this particular vhost or route. // If disabled is specified in multiple per-filter-configs, the most specific one will be used. bool disabled = 1 [(validate.rules).bool = {const: true}]; // Check request settings for this route. CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; } } // Extra settings for the check request. message CheckSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ext_authz.v2.CheckSettings"; // Context extensions to set on the CheckRequest's // :ref:`AttributeContext.context_extensions` // // You can use this to provide extra context for the external authorization server on specific // virtual hosts/routes. For example, adding a context extension on the virtual host level can // give the ext-authz server information on what virtual host is used without needing to parse the // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged // in order, and the result will be used. // // Merge semantics for this field are such that keys from more specific configs override. // // .. note:: // // These settings are only applied to a filter configured with a // :ref:`grpc_service`. map context_extensions = 1; // When set to true, disable the configured :ref:`with_request_body // ` for a route. bool disable_request_body_buffering = 2; } ================================================ FILE: api/envoy/extensions/filters/http/ext_authz/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/ext_authz/v4alpha/ext_authz.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.ext_authz.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "envoy/config/core/v4alpha/http_uri.proto"; import "envoy/type/matcher/v4alpha/metadata.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/v3/http_status.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ext_authz.v4alpha"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: External Authorization] // External Authorization :ref:`configuration overview `. // [#extension: envoy.filters.http.ext_authz] // [#next-free-field: 15] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.ExtAuthz"; reserved 4; reserved "use_alpha"; // External authorization service configuration. oneof services { // gRPC service configuration (default timeout: 200ms). config.core.v4alpha.GrpcService grpc_service = 1; // HTTP service configuration (default timeout: 200ms). HttpService http_service = 3; } // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and // version of messages used on the wire. config.core.v4alpha.ApiVersion transport_api_version = 12 [(validate.rules).enum = {defined_only: true}]; // Changes filter's behaviour on errors: // // 1. When set to true, the filter will *accept* client request even if the communication with // the authorization service has failed, or if the authorization service has returned a HTTP 5xx // error. // // 2. When set to false, ext-authz will *reject* client requests and return a *Forbidden* // response if the communication with the authorization service has failed, or if the // authorization service has returned a HTTP 5xx error. // // Note that errors can be *always* tracked in the :ref:`stats // `. bool failure_mode_allow = 2; // Enables filter to buffer the client request body and send it within the authorization request. // A ``x-envoy-auth-partial-body: false|true`` metadata header will be added to the authorization // request message indicating if the body data is partial. BufferSettings with_request_body = 5; // Clears route cache in order to allow the external authorization service to correctly affect // routing decisions. Filter clears all cached routes when: // // 1. The field is set to *true*. // // 2. The status returned from the authorization service is a HTTP 200 or gRPC 0. // // 3. At least one *authorization response header* is added to the client request, or is used for // altering another client request header. // bool clear_route_cache = 6; // Sets the HTTP status that is returned to the client when there is a network error between the // filter and the authorization server. The default status is HTTP 403 Forbidden. type.v3.HttpStatus status_on_error = 7; // Specifies a list of metadata namespaces whose values, if present, will be passed to the // ext_authz service as an opaque *protobuf::Struct*. // // For example, if the *jwt_authn* filter is used and :ref:`payload_in_metadata // ` is set, // then the following will pass the jwt payload to the authorization server. // // .. code-block:: yaml // // metadata_context_namespaces: // - envoy.filters.http.jwt_authn // repeated string metadata_context_namespaces = 8; // Specifies if the filter is enabled. // // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to get the percentage of requests to filter. // // If this field is not specified, the filter will be enabled for all requests. config.core.v4alpha.RuntimeFractionalPercent filter_enabled = 9; // Specifies if the filter is enabled with metadata matcher. // If this field is not specified, the filter will be enabled for all requests. type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 14; // Specifies whether to deny the requests, when the filter is disabled. // If :ref:`runtime_key ` is specified, // Envoy will lookup the runtime key to determine whether to deny request for // filter protected path at filter disabling. If filter is disabled in // typed_per_filter_config for the path, requests will not be denied. // // If this field is not specified, all requests will be allowed when disabled. config.core.v4alpha.RuntimeFeatureFlag deny_at_disable = 11; // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 10; // Optional additional prefix to use when emitting statistics. This allows to distinguish // emitted statistics between configured *ext_authz* filters in an HTTP filter chain. For example: // // .. code-block:: yaml // // http_filters: // - name: envoy.filters.http.ext_authz // typed_config: // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz // stat_prefix: waf # This emits ext_authz.waf.ok, ext_authz.waf.denied, etc. // - name: envoy.filters.http.ext_authz // typed_config: // "@type": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz // stat_prefix: blocker # This emits ext_authz.blocker.ok, ext_authz.blocker.denied, etc. // string stat_prefix = 13; } // Configuration for buffering the request data. message BufferSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.BufferSettings"; // Sets the maximum size of a message body that the filter will hold in memory. Envoy will return // *HTTP 413* and will *not* initiate the authorization process when buffer reaches the number // set in this field. Note that this setting will have precedence over :ref:`failure_mode_allow // `. uint32 max_request_bytes = 1 [(validate.rules).uint32 = {gt: 0}]; // When this field is true, Envoy will buffer the message until *max_request_bytes* is reached. // The authorization request will be dispatched and no 413 HTTP error will be returned by the // filter. bool allow_partial_message = 2; // If true, the body sent to the external authorization service is set with raw bytes, it sets // the :ref:`raw_body` // field of HTTP request attribute context. Otherwise, :ref:` // body` will be filled // with UTF-8 string request body. bool pack_as_bytes = 3; } // HttpService is used for raw HTTP communication between the filter and the authorization service. // When configured, the filter will parse the client request and use these attributes to call the // authorization server. Depending on the response, the filter may reject or accept the client // request. Note that in any of these events, metadata can be added, removed or overridden by the // filter: // // *On authorization request*, a list of allowed request headers may be supplied. See // :ref:`allowed_headers // ` // for details. Additional headers metadata may be added to the authorization request. See // :ref:`headers_to_add // ` for // details. // // On authorization response status HTTP 200 OK, the filter will allow traffic to the upstream and // additional headers metadata may be added to the original client request. See // :ref:`allowed_upstream_headers // ` // for details. // // On other authorization response statuses, the filter will not allow traffic. Additional headers // metadata as well as body may be added to the client's response. See :ref:`allowed_client_headers // ` // for details. // [#next-free-field: 9] message HttpService { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.HttpService"; reserved 3, 4, 5, 6; // Sets the HTTP server URI which the authorization requests must be sent to. config.core.v4alpha.HttpUri server_uri = 1; // Sets a prefix to the value of authorization request header *Path*. string path_prefix = 2; // Settings used for controlling authorization request metadata. AuthorizationRequest authorization_request = 7; // Settings used for controlling authorization response metadata. AuthorizationResponse authorization_response = 8; } message AuthorizationRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.AuthorizationRequest"; // Authorization request will include the client request headers that have a correspondent match // in the :ref:`list `. Note that in addition to the // user's supplied matchers: // // 1. *Host*, *Method*, *Path* and *Content-Length* are automatically included to the list. // // 2. *Content-Length* will be set to 0 and the request to the authorization service will not have // a message body. However, the authorization request can include the buffered client request body // (controlled by :ref:`with_request_body // ` setting), // consequently the value of *Content-Length* of the authorization request reflects the size of // its payload size. // type.matcher.v4alpha.ListStringMatcher allowed_headers = 1; // Sets a list of headers that will be included to the request to authorization service. Note that // client request of the same key will be overridden. repeated config.core.v4alpha.HeaderValue headers_to_add = 2; } message AuthorizationResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.AuthorizationResponse"; // When this :ref:`list ` is set, authorization // response headers that have a correspondent match will be added to the original client request. // Note that coexistent headers will be overridden. type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers = 1; // When this :ref:`list ` is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that coexistent headers will be appended. type.matcher.v4alpha.ListStringMatcher allowed_upstream_headers_to_append = 3; // When this :ref:`list `. is set, authorization // response headers that have a correspondent match will be added to the client's response. Note // that when this list is *not* set, all the authorization response headers, except *Authority // (Host)* will be in the response to the client. When a header is included in this list, *Path*, // *Status*, *Content-Length*, *WWWAuthenticate* and *Location* are automatically added. type.matcher.v4alpha.ListStringMatcher allowed_client_headers = 2; } // Extra settings on a per virtualhost/route/weighted-cluster level. message ExtAuthzPerRoute { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute"; oneof override { option (validate.required) = true; // Disable the ext auth filter for this particular vhost or route. // If disabled is specified in multiple per-filter-configs, the most specific one will be used. bool disabled = 1 [(validate.rules).bool = {const: true}]; // Check request settings for this route. CheckSettings check_settings = 2 [(validate.rules).message = {required: true}]; } } // Extra settings for the check request. message CheckSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.ext_authz.v3.CheckSettings"; // Context extensions to set on the CheckRequest's // :ref:`AttributeContext.context_extensions` // // You can use this to provide extra context for the external authorization server on specific // virtual hosts/routes. For example, adding a context extension on the virtual host level can // give the ext-authz server information on what virtual host is used without needing to parse the // host header. If CheckSettings is specified in multiple per-filter-configs, they will be merged // in order, and the result will be used. // // Merge semantics for this field are such that keys from more specific configs override. // // .. note:: // // These settings are only applied to a filter configured with a // :ref:`grpc_service`. map context_extensions = 1; // When set to true, disable the configured :ref:`with_request_body // ` for a route. bool disable_request_body_buffering = 2; } ================================================ FILE: api/envoy/extensions/filters/http/fault/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/fault/v2:pkg", "//envoy/config/route/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/fault/v3/fault.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.fault.v3; import "envoy/config/route/v3/route_components.proto"; import "envoy/extensions/filters/common/fault/v3/fault.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v3"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Fault Injection] // Fault Injection :ref:`configuration overview `. // [#extension: envoy.filters.http.fault] // [#next-free-field: 6] message FaultAbort { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.FaultAbort"; // Fault aborts are controlled via an HTTP header (if applicable). See the // :ref:`HTTP fault filter ` documentation for // more information. message HeaderAbort { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.FaultAbort.HeaderAbort"; } reserved 1; oneof error_type { option (validate.required) = true; // HTTP status code to use to abort the HTTP request. uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; // gRPC status code to use to abort the gRPC request. uint32 grpc_status = 5; // Fault aborts are controlled via an HTTP header (if applicable). HeaderAbort header_abort = 4; } // The percentage of requests/operations/connections that will be aborted with the error code // provided. type.v3.FractionalPercent percentage = 3; } // [#next-free-field: 15] message HTTPFault { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.fault.v2.HTTPFault"; // If specified, the filter will inject delays based on the values in the // object. common.fault.v3.FaultDelay delay = 1; // If specified, the filter will abort requests based on the values in // the object. At least *abort* or *delay* must be specified. FaultAbort abort = 2; // Specifies the name of the (destination) upstream cluster that the // filter should match on. Fault injection will be restricted to requests // bound to the specific upstream cluster. string upstream_cluster = 3; // Specifies a set of headers that the filter should match on. The fault // injection filter can be applied selectively to requests that match a set of // headers specified in the fault filter config. The chances of actual fault // injection further depend on the value of the :ref:`percentage // ` field. // The filter will check the request's headers against all the specified // headers in the filter config. A match will happen if all the headers in the // config are present in the request with the same values (or based on // presence if the *value* field is not in the config). repeated config.route.v3.HeaderMatcher headers = 4; // Faults are injected for the specified list of downstream hosts. If this // setting is not set, faults are injected for all downstream nodes. // Downstream node name is taken from :ref:`the HTTP // x-envoy-downstream-service-node // ` header and compared // against downstream_nodes list. repeated string downstream_nodes = 5; // The maximum number of faults that can be active at a single time via the configured fault // filter. Note that because this setting can be overridden at the route level, it's possible // for the number of active faults to be greater than this value (if injected via a different // route). If not specified, defaults to unlimited. This setting can be overridden via // `runtime ` and any faults that are not injected // due to overflow will be indicated via the `faults_overflow // ` stat. // // .. attention:: // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy // limit. It's possible for the number of active faults to rise slightly above the configured // amount due to the implementation details. google.protobuf.UInt32Value max_active_faults = 6; // The response rate limit to be applied to the response body of the stream. When configured, // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent // ` runtime key. // // .. attention:: // This is a per-stream limit versus a connection level limit. This means that concurrent streams // will each get an independent limit. common.fault.v3.FaultRateLimit response_rate_limit = 7; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.delay.fixed_delay_percent string delay_percent_runtime = 8; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.abort.abort_percent string abort_percent_runtime = 9; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.delay.fixed_duration_ms string delay_duration_runtime = 10; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.abort.http_status string abort_http_status_runtime = 11; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.max_active_faults string max_active_faults_runtime = 12; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.rate_limit.response_percent string response_rate_limit_percent_runtime = 13; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.abort.grpc_status string abort_grpc_status_runtime = 14; } ================================================ FILE: api/envoy/extensions/filters/http/fault/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/route/v4alpha:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/fault/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/fault/v4alpha/fault.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.fault.v4alpha; import "envoy/config/route/v4alpha/route_components.proto"; import "envoy/extensions/filters/common/fault/v3/fault.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.fault.v4alpha"; option java_outer_classname = "FaultProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Fault Injection] // Fault Injection :ref:`configuration overview `. // [#extension: envoy.filters.http.fault] // [#next-free-field: 6] message FaultAbort { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.fault.v3.FaultAbort"; // Fault aborts are controlled via an HTTP header (if applicable). See the // :ref:`HTTP fault filter ` documentation for // more information. message HeaderAbort { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.fault.v3.FaultAbort.HeaderAbort"; } reserved 1; oneof error_type { option (validate.required) = true; // HTTP status code to use to abort the HTTP request. uint32 http_status = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; // gRPC status code to use to abort the gRPC request. uint32 grpc_status = 5; // Fault aborts are controlled via an HTTP header (if applicable). HeaderAbort header_abort = 4; } // The percentage of requests/operations/connections that will be aborted with the error code // provided. type.v3.FractionalPercent percentage = 3; } // [#next-free-field: 15] message HTTPFault { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.fault.v3.HTTPFault"; // If specified, the filter will inject delays based on the values in the // object. common.fault.v3.FaultDelay delay = 1; // If specified, the filter will abort requests based on the values in // the object. At least *abort* or *delay* must be specified. FaultAbort abort = 2; // Specifies the name of the (destination) upstream cluster that the // filter should match on. Fault injection will be restricted to requests // bound to the specific upstream cluster. string upstream_cluster = 3; // Specifies a set of headers that the filter should match on. The fault // injection filter can be applied selectively to requests that match a set of // headers specified in the fault filter config. The chances of actual fault // injection further depend on the value of the :ref:`percentage // ` field. // The filter will check the request's headers against all the specified // headers in the filter config. A match will happen if all the headers in the // config are present in the request with the same values (or based on // presence if the *value* field is not in the config). repeated config.route.v4alpha.HeaderMatcher headers = 4; // Faults are injected for the specified list of downstream hosts. If this // setting is not set, faults are injected for all downstream nodes. // Downstream node name is taken from :ref:`the HTTP // x-envoy-downstream-service-node // ` header and compared // against downstream_nodes list. repeated string downstream_nodes = 5; // The maximum number of faults that can be active at a single time via the configured fault // filter. Note that because this setting can be overridden at the route level, it's possible // for the number of active faults to be greater than this value (if injected via a different // route). If not specified, defaults to unlimited. This setting can be overridden via // `runtime ` and any faults that are not injected // due to overflow will be indicated via the `faults_overflow // ` stat. // // .. attention:: // Like other :ref:`circuit breakers ` in Envoy, this is a fuzzy // limit. It's possible for the number of active faults to rise slightly above the configured // amount due to the implementation details. google.protobuf.UInt32Value max_active_faults = 6; // The response rate limit to be applied to the response body of the stream. When configured, // the percentage can be overridden by the :ref:`fault.http.rate_limit.response_percent // ` runtime key. // // .. attention:: // This is a per-stream limit versus a connection level limit. This means that concurrent streams // will each get an independent limit. common.fault.v3.FaultRateLimit response_rate_limit = 7; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.delay.fixed_delay_percent string delay_percent_runtime = 8; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.abort.abort_percent string abort_percent_runtime = 9; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.delay.fixed_duration_ms string delay_duration_runtime = 10; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.abort.http_status string abort_http_status_runtime = 11; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.max_active_faults string max_active_faults_runtime = 12; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.rate_limit.response_percent string response_rate_limit_percent_runtime = 13; // The runtime key to override the :ref:`default ` // runtime. The default is: fault.http.abort.grpc_status string abort_grpc_status_runtime = 14; } ================================================ FILE: api/envoy/extensions/filters/http/grpc_http1_bridge/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/grpc_http1_bridge/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/grpc_http1_bridge/v3/config.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.grpc_http1_bridge.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_bridge.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC HTTP/1.1 Bridge] // gRPC HTTP/1.1 Bridge Filter :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_http1_bridge] // gRPC HTTP/1.1 Bridge filter config. message Config { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.grpc_http1_bridge.v2.Config"; } ================================================ FILE: api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3/config.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_http1_reverse_bridge.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC HTTP/1.1 Reverse Bridge] // gRPC HTTP/1.1 Reverse Bridge :ref:`configuration overview // `. // [#extension: envoy.filters.http.grpc_http1_reverse_bridge] // gRPC reverse bridge filter configuration message FilterConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfig"; // The content-type to pass to the upstream when the gRPC bridge filter is applied. // The filter will also validate that the upstream responds with the same content type. string content_type = 1 [(validate.rules).string = {min_len: 1}]; // If true, Envoy will assume that the upstream doesn't understand gRPC frames and // strip the gRPC frame from the request, and add it back in to the response. This will // hide the gRPC semantics from the upstream, allowing it to receive and respond with a // simple binary encoded protobuf. bool withhold_grpc_frames = 2; } // gRPC reverse bridge filter configuration per virtualhost/route/weighted-cluster level. message FilterConfigPerRoute { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.grpc_http1_reverse_bridge.v2alpha1.FilterConfigPerRoute"; // If true, disables gRPC reverse bridge filter for this particular vhost or route. // If disabled is specified in multiple per-filter-configs, the most specific one will be used. bool disabled = 1; } ================================================ FILE: api/envoy/extensions/filters/http/grpc_json_transcoder/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/transcoder/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/grpc_json_transcoder/v3/transcoder.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.grpc_json_transcoder.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_json_transcoder.v3"; option java_outer_classname = "TranscoderProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC-JSON transcoder] // gRPC-JSON transcoder :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_json_transcoder] // [#next-free-field: 10] message GrpcJsonTranscoder { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder"; message PrintOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.transcoder.v2.GrpcJsonTranscoder.PrintOptions"; // Whether to add spaces, line breaks and indentation to make the JSON // output easy to read. Defaults to false. bool add_whitespace = 1; // Whether to always print primitive fields. By default primitive // fields with default values will be omitted in JSON output. For // example, an int32 field set to 0 will be omitted. Setting this flag to // true will override the default behavior and print primitive fields // regardless of their values. Defaults to false. bool always_print_primitive_fields = 2; // Whether to always print enums as ints. By default they are rendered // as strings. Defaults to false. bool always_print_enums_as_ints = 3; // Whether to preserve proto field names. By default protobuf will // generate JSON field names using the ``json_name`` option, or lower camel case, // in that order. Setting this flag will preserve the original field names. Defaults to false. bool preserve_proto_field_names = 4; } oneof descriptor_set { option (validate.required) = true; // Supplies the filename of // :ref:`the proto descriptor set ` for the gRPC // services. string proto_descriptor = 1; // Supplies the binary content of // :ref:`the proto descriptor set ` for the gRPC // services. bytes proto_descriptor_bin = 4; } // A list of strings that // supplies the fully qualified service names (i.e. "package_name.service_name") that // the transcoder will translate. If the service name doesn't exist in ``proto_descriptor``, // Envoy will fail at startup. The ``proto_descriptor`` may contain more services than // the service names specified here, but they won't be translated. repeated string services = 2 [(validate.rules).repeated = {min_items: 1}]; // Control options for response JSON. These options are passed directly to // `JsonPrintOptions `_. PrintOptions print_options = 3; // Whether to keep the incoming request route after the outgoing headers have been transformed to // the match the upstream gRPC service. Note: This means that routes for gRPC services that are // not transcoded cannot be used in combination with *match_incoming_request_route*. bool match_incoming_request_route = 5; // A list of query parameters to be ignored for transcoding method mapping. // By default, the transcoder filter will not transcode a request if there are any // unknown/invalid query parameters. // // Example : // // .. code-block:: proto // // service Bookstore { // rpc GetShelf(GetShelfRequest) returns (Shelf) { // option (google.api.http) = { // get: "/shelves/{shelf}" // }; // } // } // // message GetShelfRequest { // int64 shelf = 1; // } // // message Shelf {} // // The request ``/shelves/100?foo=bar`` will not be mapped to ``GetShelf``` because variable // binding for ``foo`` is not defined. Adding ``foo`` to ``ignored_query_parameters`` will allow // the same request to be mapped to ``GetShelf``. repeated string ignored_query_parameters = 6; // Whether to route methods without the ``google.api.http`` option. // // Example : // // .. code-block:: proto // // package bookstore; // // service Bookstore { // rpc GetShelf(GetShelfRequest) returns (Shelf) {} // } // // message GetShelfRequest { // int64 shelf = 1; // } // // message Shelf {} // // The client could ``post`` a json body ``{"shelf": 1234}`` with the path of // ``/bookstore.Bookstore/GetShelfRequest`` to call ``GetShelfRequest``. bool auto_mapping = 7; // Whether to ignore query parameters that cannot be mapped to a corresponding // protobuf field. Use this if you cannot control the query parameters and do // not know them beforehand. Otherwise use ``ignored_query_parameters``. // Defaults to false. bool ignore_unknown_query_parameters = 8; // Whether to convert gRPC status headers to JSON. // When trailer indicates a gRPC error and there was no HTTP body, take ``google.rpc.Status`` // from the ``grpc-status-details-bin`` header and use it as JSON body. // If there was no such header, make ``google.rpc.Status`` out of the ``grpc-status`` and // ``grpc-message`` headers. // The error details types must be present in the ``proto_descriptor``. // // For example, if an upstream server replies with headers: // // .. code-block:: none // // grpc-status: 5 // grpc-status-details-bin: // CAUaMwoqdHlwZS5nb29nbGVhcGlzLmNvbS9nb29nbGUucnBjLlJlcXVlc3RJbmZvEgUKA3ItMQ // // The ``grpc-status-details-bin`` header contains a base64-encoded protobuf message // ``google.rpc.Status``. It will be transcoded into: // // .. code-block:: none // // HTTP/1.1 404 Not Found // content-type: application/json // // {"code":5,"details":[{"@type":"type.googleapis.com/google.rpc.RequestInfo","requestId":"r-1"}]} // // In order to transcode the message, the ``google.rpc.RequestInfo`` type from // the ``google/rpc/error_details.proto`` should be included in the configured // :ref:`proto descriptor set `. bool convert_grpc_status = 9; } ================================================ FILE: api/envoy/extensions/filters/http/grpc_stats/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/grpc_stats/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/grpc_stats/v3/config.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.grpc_stats.v3; import "envoy/config/core/v3/grpc_method_list.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_stats.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC statistics] gRPC statistics filter // :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_stats] // gRPC statistics filter configuration message FilterConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.grpc_stats.v2alpha.FilterConfig"; // If true, the filter maintains a filter state object with the request and response message // counts. bool emit_filter_state = 1; oneof per_method_stat_specifier { // If set, specifies an allowlist of service/methods that will have individual stats // emitted for them. Any call that does not match the allowlist will be counted // in a stat with no method specifier: `cluster..grpc.*`. config.core.v3.GrpcMethodList individual_method_stats_allowlist = 2; // If set to true, emit stats for all service/method names. // // If set to false, emit stats for all service/message types to the same stats without including // the service/method in the name, with prefix `cluster..grpc`. This can be useful if // service/method granularity is not needed, or if each cluster only receives a single method. // // .. attention:: // This option is only safe if all clients are trusted. If this option is enabled // with untrusted clients, the clients could cause unbounded growth in the number of stats in // Envoy, using unbounded memory and potentially slowing down stats pipelines. // // .. attention:: // If neither `individual_method_stats_allowlist` nor `stats_for_all_methods` is set, the // behavior will default to `stats_for_all_methods=true`. This default value is deprecated, // and in a future release, if neither field is set, it will default to // `stats_for_all_methods=false` in order to be safe by default. This behavior can be // controlled with runtime override // `envoy.deprecated_features.grpc_stats_filter_enable_stats_for_all_methods_by_default`. google.protobuf.BoolValue stats_for_all_methods = 3; } // If true, the filter will gather a histogram for the request time of the upstream. // It works with :ref:`stats_for_all_methods // ` // and :ref:`individual_method_stats_allowlist // ` the same way // request_message_count and response_message_count works. bool enable_upstream_stats = 4; } // gRPC statistics filter state object in protobuf form. message FilterObject { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.grpc_stats.v2alpha.FilterObject"; // Count of request messages in the request stream. uint64 request_message_count = 1; // Count of response messages in the response stream. uint64 response_message_count = 2; } ================================================ FILE: api/envoy/extensions/filters/http/grpc_web/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/grpc_web/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/grpc_web/v3/grpc_web.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.grpc_web.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.grpc_web.v3"; option java_outer_classname = "GrpcWebProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Web] // gRPC Web :ref:`configuration overview `. // [#extension: envoy.filters.http.grpc_web] // gRPC Web filter config. message GrpcWeb { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.grpc_web.v2.GrpcWeb"; } ================================================ FILE: api/envoy/extensions/filters/http/gzip/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/gzip/v2:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/gzip/v3/gzip.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.gzip.v3; import "envoy/extensions/filters/http/compressor/v3/compressor.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.gzip.v3"; option java_outer_classname = "GzipProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Gzip] // Gzip :ref:`configuration overview `. // [#extension: envoy.filters.http.gzip] // [#next-free-field: 12] message Gzip { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.gzip.v2.Gzip"; enum CompressionStrategy { DEFAULT = 0; FILTERED = 1; HUFFMAN = 2; RLE = 3; } message CompressionLevel { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.gzip.v2.Gzip.CompressionLevel"; enum Enum { DEFAULT = 0; BEST = 1; SPEED = 2; } } reserved 2, 6, 7, 8; reserved "content_length", "content_type", "disable_on_etag_header", "remove_accept_encoding_header"; // Value from 1 to 9 that controls the amount of internal memory used by zlib. Higher values // use more memory, but are faster and produce better compression results. The default value is 5. google.protobuf.UInt32Value memory_level = 1 [(validate.rules).uint32 = {lte: 9 gte: 1}]; // A value used for selecting the zlib compression level. This setting will affect speed and // amount of compression applied to the content. "BEST" provides higher compression at the cost of // higher latency, "SPEED" provides lower compression with minimum impact on response time. // "DEFAULT" provides an optimal result between speed and compression. This field will be set to // "DEFAULT" if not specified. CompressionLevel.Enum compression_level = 3 [(validate.rules).enum = {defined_only: true}]; // A value used for selecting the zlib compression strategy which is directly related to the // characteristics of the content. Most of the time "DEFAULT" will be the best choice, though // there are situations which changing this parameter might produce better results. For example, // run-length encoding (RLE) is typically used when the content is known for having sequences // which same data occurs many consecutive times. For more information about each strategy, please // refer to zlib manual. CompressionStrategy compression_strategy = 4 [(validate.rules).enum = {defined_only: true}]; // Value from 9 to 15 that represents the base two logarithmic of the compressor's window size. // Larger window results in better compression at the expense of memory usage. The default is 12 // which will produce a 4096 bytes window. For more details about this parameter, please refer to // zlib manual > deflateInit2. google.protobuf.UInt32Value window_bits = 9 [(validate.rules).uint32 = {lte: 15 gte: 9}]; // Set of configuration parameters common for all compression filters. If this field is set then // the fields `content_length`, `content_type`, `disable_on_etag_header` and // `remove_accept_encoding_header` are ignored. compressor.v3.Compressor compressor = 10; // Value for Zlib's next output buffer. If not set, defaults to 4096. // See https://www.zlib.net/manual.html for more details. Also see // https://github.com/envoyproxy/envoy/issues/8448 for context on this filter's performance. google.protobuf.UInt32Value chunk_size = 11 [(validate.rules).uint32 = {lte: 65536 gte: 4096}]; } ================================================ FILE: api/envoy/extensions/filters/http/header_to_metadata/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/header_to_metadata/v2:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/header_to_metadata/v3/header_to_metadata.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.header_to_metadata.v3; import "envoy/type/matcher/v3/regex.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v3"; option java_outer_classname = "HeaderToMetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Header-To-Metadata Filter] // // The configuration for transforming headers into metadata. This is useful // for matching load balancer subsets, logging, etc. // // Header to Metadata :ref:`configuration overview `. // [#extension: envoy.filters.http.header_to_metadata] message Config { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config"; enum ValueType { STRING = 0; NUMBER = 1; // The value is a serialized `protobuf.Value // `_. PROTOBUF_VALUE = 2; } // ValueEncode defines the encoding algorithm. enum ValueEncode { // The value is not encoded. NONE = 0; // The value is encoded in `Base64 `_. // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the // non-ASCII characters in the header. BASE64 = 1; } // [#next-free-field: 7] message KeyValuePair { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.KeyValuePair"; // The namespace — if this is empty, the filter's namespace will be used. string metadata_namespace = 1; // The key to use within the namespace. string key = 2 [(validate.rules).string = {min_len: 1}]; // The value to pair with the given key. // // When used for a // :ref:`on_header_present ` // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. // // When used for a :ref:`on_header_missing ` // case, a non-empty value must be provided otherwise no metadata is added. string value = 3 [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value // is used as-is. // // This is only used for :ref:`on_header_present `. // // Note: if the `value` field is non-empty this field should be empty. type.matcher.v3.RegexMatchAndSubstitute regex_value_rewrite = 6 [(udpa.annotations.field_migrate).oneof_promotion = "value_type"]; // The value's type — defaults to string. ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; // How is the value encoded, default is NONE (not encoded). // The value will be decoded accordingly before storing to metadata. ValueEncode encode = 5; } // A Rule defines what metadata to apply when a header is present or missing. // [#next-free-field: 6] message Rule { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.header_to_metadata.v2.Config.Rule"; // Specifies that a match will be performed on the value of a header or a cookie. // // The header to be extracted. string header = 1 [ (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" ]; // The cookie to be extracted. string cookie = 5 [ (validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}, (udpa.annotations.field_migrate).oneof_promotion = "header_cookie_specifier" ]; // If the header or cookie is present, apply this metadata KeyValuePair. // // If the value in the KeyValuePair is non-empty, it'll be used instead // of the header or cookie value. KeyValuePair on_header_present = 2 [(udpa.annotations.field_migrate).rename = "on_present"]; // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu // of the missing header or cookie value. KeyValuePair on_header_missing = 3 [(udpa.annotations.field_migrate).rename = "on_missing"]; // Whether or not to remove the header after a rule is applied. // // This prevents headers from leaking. // This field is not supported in case of a cookie. bool remove = 4; } // The list of rules to apply to requests. repeated Rule request_rules = 1; // The list of rules to apply to responses. repeated Rule response_rules = 2; } ================================================ FILE: api/envoy/extensions/filters/http/header_to_metadata/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/header_to_metadata/v4alpha/header_to_metadata.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.header_to_metadata.v4alpha; import "envoy/type/matcher/v4alpha/regex.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.header_to_metadata.v4alpha"; option java_outer_classname = "HeaderToMetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Header-To-Metadata Filter] // // The configuration for transforming headers into metadata. This is useful // for matching load balancer subsets, logging, etc. // // Header to Metadata :ref:`configuration overview `. // [#extension: envoy.filters.http.header_to_metadata] message Config { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.header_to_metadata.v3.Config"; enum ValueType { STRING = 0; NUMBER = 1; // The value is a serialized `protobuf.Value // `_. PROTOBUF_VALUE = 2; } // ValueEncode defines the encoding algorithm. enum ValueEncode { // The value is not encoded. NONE = 0; // The value is encoded in `Base64 `_. // Note: this is mostly used for STRING and PROTOBUF_VALUE to escape the // non-ASCII characters in the header. BASE64 = 1; } // [#next-free-field: 7] message KeyValuePair { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.header_to_metadata.v3.Config.KeyValuePair"; // The namespace — if this is empty, the filter's namespace will be used. string metadata_namespace = 1; // The key to use within the namespace. string key = 2 [(validate.rules).string = {min_len: 1}]; oneof value_type { // The value to pair with the given key. // // When used for a // :ref:`on_header_present ` // case, if value is non-empty it'll be used instead of the header value. If both are empty, no metadata is added. // // When used for a :ref:`on_header_missing ` // case, a non-empty value must be provided otherwise no metadata is added. string value = 3; // If present, the header's value will be matched and substituted with this. If there is no match or substitution, the header value // is used as-is. // // This is only used for :ref:`on_header_present `. // // Note: if the `value` field is non-empty this field should be empty. type.matcher.v4alpha.RegexMatchAndSubstitute regex_value_rewrite = 6; } // The value's type — defaults to string. ValueType type = 4 [(validate.rules).enum = {defined_only: true}]; // How is the value encoded, default is NONE (not encoded). // The value will be decoded accordingly before storing to metadata. ValueEncode encode = 5; } // A Rule defines what metadata to apply when a header is present or missing. // [#next-free-field: 6] message Rule { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.header_to_metadata.v3.Config.Rule"; oneof header_cookie_specifier { // Specifies that a match will be performed on the value of a header or a cookie. // // The header to be extracted. string header = 1 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; // The cookie to be extracted. string cookie = 5 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; } // If the header or cookie is present, apply this metadata KeyValuePair. // // If the value in the KeyValuePair is non-empty, it'll be used instead // of the header or cookie value. KeyValuePair on_present = 2; // If the header or cookie is not present, apply this metadata KeyValuePair. // // The value in the KeyValuePair must be set, since it'll be used in lieu // of the missing header or cookie value. KeyValuePair on_missing = 3; // Whether or not to remove the header after a rule is applied. // // This prevents headers from leaking. // This field is not supported in case of a cookie. bool remove = 4; } // The list of rules to apply to requests. repeated Rule request_rules = 1; // The list of rules to apply to responses. repeated Rule response_rules = 2; } ================================================ FILE: api/envoy/extensions/filters/http/health_check/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/health_check/v2:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/health_check/v3/health_check.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.health_check.v3; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v3"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. // [#extension: envoy.filters.http.health_check] // [#next-free-field: 6] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.health_check.v2.HealthCheck"; reserved 2; // Specifies whether the filter operates in pass through mode or not. google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; // If operating in pass through mode, the amount of time in milliseconds // that the filter should cache the upstream response. google.protobuf.Duration cache_time = 3; // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. // // .. note:: // // This value is interpreted as an integer by truncating, so 12.50% will be calculated // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will // check a request’s headers against all the specified headers. To specify the health check // endpoint, set the ``:path`` header to match on. repeated config.route.v3.HeaderMatcher headers = 5; } ================================================ FILE: api/envoy/extensions/filters/http/health_check/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/route/v4alpha:pkg", "//envoy/extensions/filters/http/health_check/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/health_check/v4alpha/health_check.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.health_check.v4alpha; import "envoy/config/route/v4alpha/route_components.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.health_check.v4alpha"; option java_outer_classname = "HealthCheckProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Health check] // Health check :ref:`configuration overview `. // [#extension: envoy.filters.http.health_check] // [#next-free-field: 6] message HealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.health_check.v3.HealthCheck"; reserved 2; // Specifies whether the filter operates in pass through mode or not. google.protobuf.BoolValue pass_through_mode = 1 [(validate.rules).message = {required: true}]; // If operating in pass through mode, the amount of time in milliseconds // that the filter should cache the upstream response. google.protobuf.Duration cache_time = 3; // If operating in non-pass-through mode, specifies a set of upstream cluster // names and the minimum percentage of servers in each of those clusters that // must be healthy or degraded in order for the filter to return a 200. // // .. note:: // // This value is interpreted as an integer by truncating, so 12.50% will be calculated // as if it were 12%. map cluster_min_healthy_percentages = 4; // Specifies a set of health check request headers to match on. The health check filter will // check a request’s headers against all the specified headers. To specify the health check // endpoint, set the ``:path`` header to match on. repeated config.route.v4alpha.HeaderMatcher headers = 5; } ================================================ FILE: api/envoy/extensions/filters/http/ip_tagging/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/ip_tagging/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/ip_tagging/v3/ip_tagging.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.ip_tagging.v3; import "envoy/config/core/v3/address.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ip_tagging.v3"; option java_outer_classname = "IpTaggingProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: IP tagging] // IP tagging :ref:`configuration overview `. // [#extension: envoy.filters.http.ip_tagging] message IPTagging { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ip_tagging.v2.IPTagging"; // The type of requests the filter should apply to. The supported types // are internal, external or both. The // :ref:`x-forwarded-for` header is // used to determine if a request is internal and will result in // :ref:`x-envoy-internal` // being set. The filter defaults to both, and it will apply to all request types. enum RequestType { // Both external and internal requests will be tagged. This is the default value. BOTH = 0; // Only internal requests will be tagged. INTERNAL = 1; // Only external requests will be tagged. EXTERNAL = 2; } // Supplies the IP tag name and the IP address subnets. message IPTag { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.ip_tagging.v2.IPTagging.IPTag"; // Specifies the IP tag name to apply. string ip_tag_name = 1; // A list of IP address subnets that will be tagged with // ip_tag_name. Both IPv4 and IPv6 are supported. repeated config.core.v3.CidrRange ip_list = 2; } // The type of request the filter should apply to. RequestType request_type = 1 [(validate.rules).enum = {defined_only: true}]; // [#comment:TODO(ccaraman): Extend functionality to load IP tags from file system. // Tracked by issue https://github.com/envoyproxy/envoy/issues/2695] // The set of IP tags for the filter. repeated IPTag ip_tags = 4 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/extensions/filters/http/jwt_authn/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/jwt_authn/v2alpha:pkg", "//envoy/config/route/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/jwt_authn/v3/README.md ================================================ # JWT Authentication HTTP filter config ## Overview 1. The proto file in this folder defines an HTTP filter config for "jwt_authn" filter. 2. This filter will verify the JWT in the HTTP request as: - The signature should be valid - JWT should not be expired - Issuer and audiences are valid and specified in the filter config. 3. [JWK](https://tools.ietf.org/html/rfc7517#appendix-A) is needed to verify JWT signature. It can be fetched from a remote server or read from a local file. If the JWKS is fetched remotely, it will be cached by the filter. 3. If a JWT is valid, the user is authenticated and the request will be forwarded to the backend server. If a JWT is not valid, the request will be rejected with an error message. ## The locations to extract JWT JWT will be extracted from the HTTP headers or query parameters. The default location is the HTTP header: ``` Authorization: Bearer ``` The next default location is in the query parameter as: ``` ?access_token= ``` If a custom location is desired, `from_headers` or `from_params` can be used to specify custom locations to extract JWT. ## HTTP header to pass successfully verified JWT If a JWT is valid, its payload will be passed to the backend in a new HTTP header specified in `forward_payload_header` field. Its value is base64url-encoded JWT payload in JSON. ## Further header options In addition to the `name` field, which specifies the HTTP header name, the `from_headers` section can specify an optional `value_prefix` value, as in: ```yaml from_headers: - name: bespoke value_prefix: jwt_value ``` The above will cause the jwt_authn filter to look for the JWT in the `bespoke` header, following the tag `jwt_value`. Any non-JWT characters (i.e., anything _other than_ alphanumerics, `_`, `-`, and `.`) will be skipped, and all following, contiguous, JWT-legal chars will be taken as the JWT. This means all of the following will return a JWT of `eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk`: ```text bespoke: jwt_value=eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk bespoke: {"jwt_value": "eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk"} bespoke: beta:true,jwt_value:"eyJFbnZveSI6ICJyb2NrcyJ9.e30.c2lnbmVk",trace=1234 ``` The header `name` may be `Authorization`. The `value_prefix` must match exactly, i.e., case-sensitively. If the `value_prefix` is not found, the header is skipped: not considered as a source for a JWT token. If there are no JWT-legal characters after the `value_prefix`, the entire string after it is taken to be the JWT token. This is unlikely to succeed; the error will reported by the JWT parser. ================================================ FILE: api/envoy/extensions/filters/http/jwt_authn/v3/config.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.jwt_authn.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/http_uri.proto"; import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: JWT Authentication] // JWT Authentication :ref:`configuration overview `. // [#extension: envoy.filters.http.jwt_authn] // Please see following for JWT authentication flow: // // * `JSON Web Token (JWT) `_ // * `The OAuth 2.0 Authorization Framework `_ // * `OpenID Connect `_ // // A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: // // * issuer: the principal that issues the JWT. It has to match the one from the token. // * allowed audiences: the ones in the token have to be listed here. // * how to fetch public key JWKS to verify the token signature. // * how to extract JWT token in the request. // * how to pass successfully verified token payload. // // Example: // // .. code-block:: yaml // // issuer: https://example.com // audiences: // - bookstore_android.apps.googleusercontent.com // - bookstore_web.apps.googleusercontent.com // remote_jwks: // http_uri: // uri: https://example.com/.well-known/jwks.json // cluster: example_jwks_cluster // cache_duration: // seconds: 300 // // [#next-free-field: 10] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtProvider"; // Specify the `principal `_ that issued // the JWT, usually a URL or an email address. // // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // string issuer = 1 [(validate.rules).string = {min_len: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, // will not check audiences in the token. // // Example: // // .. code-block:: yaml // // audiences: // - bookstore_android.apps.googleusercontent.com // - bookstore_web.apps.googleusercontent.com // repeated string audiences = 2; // `JSON Web Key Set (JWKS) `_ is needed to // validate signature of a JWT. This field specifies where to fetch JWKS. oneof jwks_source_specifier { option (validate.required) = true; // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP // URI and how the fetched JWKS should be cached. // // Example: // // .. code-block:: yaml // // remote_jwks: // http_uri: // uri: https://www.googleapis.com/oauth2/v1/certs // cluster: jwt.www.googleapis.com|443 // cache_duration: // seconds: 300 // RemoteJwks remote_jwks = 3; // JWKS is in local data source. It could be either in a local file or embedded in the // inline_string. // // Example: local file // // .. code-block:: yaml // // local_jwks: // filename: /etc/envoy/jwks/jwks1.txt // // Example: inline_string // // .. code-block:: yaml // // local_jwks: // inline_string: ACADADADADA // config.core.v3.DataSource local_jwks = 4; } // If false, the JWT is removed in the request after a success verification. If true, the JWT is // not removed in the request. Default value is false. bool forward = 5; // Two fields below define where to extract the JWT from an HTTP request. // // If no explicit location is specified, the following default locations are tried in order: // // 1. The Authorization header using the `Bearer schema // `_. Example:: // // Authorization: Bearer . // // 2. `access_token `_ query parameter. // // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations // its provider specified or from the default locations. // // Specify the HTTP headers to extract JWT token. For examples, following config: // // .. code-block:: yaml // // from_headers: // - name: x-goog-iap-jwt-assertion // // can be used to extract token from header:: // // ``x-goog-iap-jwt-assertion: ``. // repeated JwtHeader from_headers = 6; // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. // // For example, if config is: // // .. code-block:: yaml // // from_params: // - jwt_token // // The JWT format in query parameter is:: // // /path?jwt_token= // repeated string from_params = 7; // This field specifies the header name to forward a successfully verified JWT payload to the // backend. The forwarded data is:: // // base64url_encoded(jwt_payload_in_JSON) // // If it is not specified, the payload will not be forwarded. string forward_payload_header = 8 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* // and the value is the *protobuf::Struct* converted from JWT JSON payload. // // For example, if payload_in_metadata is *my_payload*: // // .. code-block:: yaml // // envoy.filters.http.jwt_authn: // my_payload: // iss: https://example.com // sub: test@example.com // aud: https://example.com // exp: 1501281058 // string payload_in_metadata = 9; } // This message specifies how to fetch JWKS from remote and how to cache it. message RemoteJwks { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.RemoteJwks"; // The HTTP URI to fetch the JWKS. For example: // // .. code-block:: yaml // // http_uri: // uri: https://www.googleapis.com/oauth2/v1/certs // cluster: jwt.www.googleapis.com|443 // config.core.v3.HttpUri http_uri = 1; // Duration after which the cached JWKS should be expired. If not specified, default cache // duration is 5 minutes. google.protobuf.Duration cache_duration = 2; } // This message specifies a header location to extract JWT token. message JwtHeader { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtHeader"; // The HTTP header name. string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the // end. string value_prefix = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Specify a required provider with audiences. message ProviderWithAudiences { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.ProviderWithAudiences"; // Specify a required provider name. string provider_name = 1; // This field overrides the one specified in the JwtProvider. repeated string audiences = 2; } // This message specifies a Jwt requirement. An empty message means JWT verification is not // required. Here are some config examples: // // .. code-block:: yaml // // # Example 1: not required with an empty message // // # Example 2: require A // provider_name: provider-A // // # Example 3: require A or B // requires_any: // requirements: // - provider_name: provider-A // - provider_name: provider-B // // # Example 4: require A and B // requires_all: // requirements: // - provider_name: provider-A // - provider_name: provider-B // // # Example 5: require A and (B or C) // requires_all: // requirements: // - provider_name: provider-A // - requires_any: // requirements: // - provider_name: provider-B // - provider_name: provider-C // // # Example 6: require A or (B and C) // requires_any: // requirements: // - provider_name: provider-A // - requires_all: // requirements: // - provider_name: provider-B // - provider_name: provider-C // // # Example 7: A is optional (if token from A is provided, it must be valid, but also allows // missing token.) // requires_any: // requirements: // - provider_name: provider-A // - allow_missing: {} // // # Example 8: A is optional and B is required. // requires_all: // requirements: // - requires_any: // requirements: // - provider_name: provider-A // - allow_missing: {} // - provider_name: provider-B // // [#next-free-field: 7] message JwtRequirement { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirement"; oneof requires_type { // Specify a required provider name. string provider_name = 1; // Specify a required provider with audiences. ProviderWithAudiences provider_and_audiences = 2; // Specify list of JwtRequirement. Their results are OR-ed. // If any one of them passes, the result is passed. JwtRequirementOrList requires_any = 3; // Specify list of JwtRequirement. Their results are AND-ed. // All of them must pass, if one of them fails or missing, it fails. JwtRequirementAndList requires_all = 4; // The requirement is always satisfied even if JWT is missing or the JWT // verification fails. A typical usage is: this filter is used to only verify // JWTs and pass the verified JWT payloads to another filter, the other filter // will make decision. In this mode, all JWT tokens will be verified. google.protobuf.Empty allow_missing_or_failed = 5; // The requirement is satisfied if JWT is missing, but failed if JWT is // presented but invalid. Similar to allow_missing_or_failed, this is used // to only verify JWTs and pass the verified payload to another filter. The // different is this mode will reject requests with invalid tokens. google.protobuf.Empty allow_missing = 6; } } // This message specifies a list of RequiredProvider. // Their results are OR-ed; if any one of them passes, the result is passed message JwtRequirementOrList { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementOrList"; // Specify a list of JwtRequirement. repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a list of RequiredProvider. // Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. message JwtRequirementAndList { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtRequirementAndList"; // Specify a list of JwtRequirement. repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a Jwt requirement for a specific Route condition. // Example 1: // // .. code-block:: yaml // // - match: // prefix: /healthz // // In above example, "requires" field is empty for /healthz prefix match, // it means that requests matching the path prefix don't require JWT authentication. // // Example 2: // // .. code-block:: yaml // // - match: // prefix: / // requires: { provider_name: provider-A } // // In above example, all requests matched the path prefix require jwt authentication // from "provider-A". message RequirementRule { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.RequirementRule"; // The route matching parameter. Only when the match is satisfied, the "requires" field will // apply. // // For example: following match will match all requests. // // .. code-block:: yaml // // match: // prefix: / // config.route.v3.RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. JwtRequirement requires = 2; } // This message specifies Jwt requirements based on stream_info.filterState. // This FilterState should use `Router::StringAccessor` object to set a string value. // Other HTTP filters can use it to specify Jwt requirements dynamically. // // Example: // // .. code-block:: yaml // // name: jwt_selector // requires: // issuer_1: // provider_name: issuer1 // issuer_2: // provider_name: issuer2 // // If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, // jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. message FilterStateRule { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.FilterStateRule"; // The filter state name to retrieve the `Router::StringAccessor` object. string name = 1 [(validate.rules).string = {min_len: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. map requires = 3; } // This is the Envoy HTTP filter config for JWT authentication. // // For example: // // .. code-block:: yaml // // providers: // provider1: // issuer: issuer1 // audiences: // - audience1 // - audience2 // remote_jwks: // http_uri: // uri: https://example.com/.well-known/jwks.json // cluster: example_jwks_cluster // provider2: // issuer: issuer2 // local_jwks: // inline_string: jwks_string // // rules: // # Not jwt verification is required for /health path // - match: // prefix: /health // // # Jwt verification for provider1 is required for path prefixed with "prefix" // - match: // prefix: /prefix // requires: // provider_name: provider1 // // # Jwt verification for either provider1 or provider2 is required for all other requests. // - match: // prefix: / // requires: // requires_any: // requirements: // - provider_name: provider1 // - provider_name: provider2 // message JwtAuthentication { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.jwt_authn.v2alpha.JwtAuthentication"; // Map of provider names to JwtProviders. // // .. code-block:: yaml // // providers: // provider1: // issuer: issuer1 // audiences: // - audience1 // - audience2 // remote_jwks: // http_uri: // uri: https://example.com/.well-known/jwks.json // cluster: example_jwks_cluster // provider2: // issuer: provider2 // local_jwks: // inline_string: jwks_string // map providers = 1; // Specifies requirements based on the route matches. The first matched requirement will be // applied. If there are overlapped match conditions, please put the most specific match first. // // Examples // // .. code-block:: yaml // // rules: // - match: // prefix: /healthz // - match: // prefix: /baz // requires: // provider_name: provider1 // - match: // prefix: /foo // requires: // requires_any: // requirements: // - provider_name: provider1 // - provider_name: provider2 // - match: // prefix: /bar // requires: // requires_all: // requirements: // - provider_name: provider1 // - provider_name: provider2 // repeated RequirementRule rules = 2; // This message specifies Jwt requirements based on stream_info.filterState. // Other HTTP filters can use it to specify Jwt requirements dynamically. // The *rules* field above is checked first, if it could not find any matches, // check this one. FilterStateRule filter_state_rules = 3; // When set to true, bypass the `CORS preflight request // `_ regardless of JWT // requirements specified in the rules. bool bypass_cors_preflight = 4; } ================================================ FILE: api/envoy/extensions/filters/http/jwt_authn/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/extensions/filters/http/jwt_authn/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/jwt_authn/v4alpha/config.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.jwt_authn.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/http_uri.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/empty.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.jwt_authn.v4alpha"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: JWT Authentication] // JWT Authentication :ref:`configuration overview `. // [#extension: envoy.filters.http.jwt_authn] // Please see following for JWT authentication flow: // // * `JSON Web Token (JWT) `_ // * `The OAuth 2.0 Authorization Framework `_ // * `OpenID Connect `_ // // A JwtProvider message specifies how a JSON Web Token (JWT) can be verified. It specifies: // // * issuer: the principal that issues the JWT. It has to match the one from the token. // * allowed audiences: the ones in the token have to be listed here. // * how to fetch public key JWKS to verify the token signature. // * how to extract JWT token in the request. // * how to pass successfully verified token payload. // // Example: // // .. code-block:: yaml // // issuer: https://example.com // audiences: // - bookstore_android.apps.googleusercontent.com // - bookstore_web.apps.googleusercontent.com // remote_jwks: // http_uri: // uri: https://example.com/.well-known/jwks.json // cluster: example_jwks_cluster // cache_duration: // seconds: 300 // // [#next-free-field: 10] message JwtProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtProvider"; // Specify the `principal `_ that issued // the JWT, usually a URL or an email address. // // Example: https://securetoken.google.com // Example: 1234567-compute@developer.gserviceaccount.com // string issuer = 1 [(validate.rules).string = {min_len: 1}]; // The list of JWT `audiences `_ are // allowed to access. A JWT containing any of these audiences will be accepted. If not specified, // will not check audiences in the token. // // Example: // // .. code-block:: yaml // // audiences: // - bookstore_android.apps.googleusercontent.com // - bookstore_web.apps.googleusercontent.com // repeated string audiences = 2; // `JSON Web Key Set (JWKS) `_ is needed to // validate signature of a JWT. This field specifies where to fetch JWKS. oneof jwks_source_specifier { option (validate.required) = true; // JWKS can be fetched from remote server via HTTP/HTTPS. This field specifies the remote HTTP // URI and how the fetched JWKS should be cached. // // Example: // // .. code-block:: yaml // // remote_jwks: // http_uri: // uri: https://www.googleapis.com/oauth2/v1/certs // cluster: jwt.www.googleapis.com|443 // cache_duration: // seconds: 300 // RemoteJwks remote_jwks = 3; // JWKS is in local data source. It could be either in a local file or embedded in the // inline_string. // // Example: local file // // .. code-block:: yaml // // local_jwks: // filename: /etc/envoy/jwks/jwks1.txt // // Example: inline_string // // .. code-block:: yaml // // local_jwks: // inline_string: ACADADADADA // config.core.v4alpha.DataSource local_jwks = 4; } // If false, the JWT is removed in the request after a success verification. If true, the JWT is // not removed in the request. Default value is false. bool forward = 5; // Two fields below define where to extract the JWT from an HTTP request. // // If no explicit location is specified, the following default locations are tried in order: // // 1. The Authorization header using the `Bearer schema // `_. Example:: // // Authorization: Bearer . // // 2. `access_token `_ query parameter. // // Multiple JWTs can be verified for a request. Each JWT has to be extracted from the locations // its provider specified or from the default locations. // // Specify the HTTP headers to extract JWT token. For examples, following config: // // .. code-block:: yaml // // from_headers: // - name: x-goog-iap-jwt-assertion // // can be used to extract token from header:: // // ``x-goog-iap-jwt-assertion: ``. // repeated JwtHeader from_headers = 6; // JWT is sent in a query parameter. `jwt_params` represents the query parameter names. // // For example, if config is: // // .. code-block:: yaml // // from_params: // - jwt_token // // The JWT format in query parameter is:: // // /path?jwt_token= // repeated string from_params = 7; // This field specifies the header name to forward a successfully verified JWT payload to the // backend. The forwarded data is:: // // base64url_encoded(jwt_payload_in_JSON) // // If it is not specified, the payload will not be forwarded. string forward_payload_header = 8 [(validate.rules).string = {well_known_regex: HTTP_HEADER_NAME strict: false}]; // If non empty, successfully verified JWT payloads will be written to StreamInfo DynamicMetadata // in the format as: *namespace* is the jwt_authn filter name as **envoy.filters.http.jwt_authn** // The value is the *protobuf::Struct*. The value of this field will be the key for its *fields* // and the value is the *protobuf::Struct* converted from JWT JSON payload. // // For example, if payload_in_metadata is *my_payload*: // // .. code-block:: yaml // // envoy.filters.http.jwt_authn: // my_payload: // iss: https://example.com // sub: test@example.com // aud: https://example.com // exp: 1501281058 // string payload_in_metadata = 9; } // This message specifies how to fetch JWKS from remote and how to cache it. message RemoteJwks { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.RemoteJwks"; // The HTTP URI to fetch the JWKS. For example: // // .. code-block:: yaml // // http_uri: // uri: https://www.googleapis.com/oauth2/v1/certs // cluster: jwt.www.googleapis.com|443 // config.core.v4alpha.HttpUri http_uri = 1; // Duration after which the cached JWKS should be expired. If not specified, default cache // duration is 5 minutes. google.protobuf.Duration cache_duration = 2; } // This message specifies a header location to extract JWT token. message JwtHeader { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtHeader"; // The HTTP header name. string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // The value prefix. The value format is "value_prefix" // For example, for "Authorization: Bearer ", value_prefix="Bearer " with a space at the // end. string value_prefix = 2 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Specify a required provider with audiences. message ProviderWithAudiences { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.ProviderWithAudiences"; // Specify a required provider name. string provider_name = 1; // This field overrides the one specified in the JwtProvider. repeated string audiences = 2; } // This message specifies a Jwt requirement. An empty message means JWT verification is not // required. Here are some config examples: // // .. code-block:: yaml // // # Example 1: not required with an empty message // // # Example 2: require A // provider_name: provider-A // // # Example 3: require A or B // requires_any: // requirements: // - provider_name: provider-A // - provider_name: provider-B // // # Example 4: require A and B // requires_all: // requirements: // - provider_name: provider-A // - provider_name: provider-B // // # Example 5: require A and (B or C) // requires_all: // requirements: // - provider_name: provider-A // - requires_any: // requirements: // - provider_name: provider-B // - provider_name: provider-C // // # Example 6: require A or (B and C) // requires_any: // requirements: // - provider_name: provider-A // - requires_all: // requirements: // - provider_name: provider-B // - provider_name: provider-C // // # Example 7: A is optional (if token from A is provided, it must be valid, but also allows // missing token.) // requires_any: // requirements: // - provider_name: provider-A // - allow_missing: {} // // # Example 8: A is optional and B is required. // requires_all: // requirements: // - requires_any: // requirements: // - provider_name: provider-A // - allow_missing: {} // - provider_name: provider-B // // [#next-free-field: 7] message JwtRequirement { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirement"; oneof requires_type { // Specify a required provider name. string provider_name = 1; // Specify a required provider with audiences. ProviderWithAudiences provider_and_audiences = 2; // Specify list of JwtRequirement. Their results are OR-ed. // If any one of them passes, the result is passed. JwtRequirementOrList requires_any = 3; // Specify list of JwtRequirement. Their results are AND-ed. // All of them must pass, if one of them fails or missing, it fails. JwtRequirementAndList requires_all = 4; // The requirement is always satisfied even if JWT is missing or the JWT // verification fails. A typical usage is: this filter is used to only verify // JWTs and pass the verified JWT payloads to another filter, the other filter // will make decision. In this mode, all JWT tokens will be verified. google.protobuf.Empty allow_missing_or_failed = 5; // The requirement is satisfied if JWT is missing, but failed if JWT is // presented but invalid. Similar to allow_missing_or_failed, this is used // to only verify JWTs and pass the verified payload to another filter. The // different is this mode will reject requests with invalid tokens. google.protobuf.Empty allow_missing = 6; } } // This message specifies a list of RequiredProvider. // Their results are OR-ed; if any one of them passes, the result is passed message JwtRequirementOrList { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementOrList"; // Specify a list of JwtRequirement. repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a list of RequiredProvider. // Their results are AND-ed; all of them must pass, if one of them fails or missing, it fails. message JwtRequirementAndList { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtRequirementAndList"; // Specify a list of JwtRequirement. repeated JwtRequirement requirements = 1 [(validate.rules).repeated = {min_items: 2}]; } // This message specifies a Jwt requirement for a specific Route condition. // Example 1: // // .. code-block:: yaml // // - match: // prefix: /healthz // // In above example, "requires" field is empty for /healthz prefix match, // it means that requests matching the path prefix don't require JWT authentication. // // Example 2: // // .. code-block:: yaml // // - match: // prefix: / // requires: { provider_name: provider-A } // // In above example, all requests matched the path prefix require jwt authentication // from "provider-A". message RequirementRule { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.RequirementRule"; // The route matching parameter. Only when the match is satisfied, the "requires" field will // apply. // // For example: following match will match all requests. // // .. code-block:: yaml // // match: // prefix: / // config.route.v4alpha.RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Specify a Jwt Requirement. Please detail comment in message JwtRequirement. JwtRequirement requires = 2; } // This message specifies Jwt requirements based on stream_info.filterState. // This FilterState should use `Router::StringAccessor` object to set a string value. // Other HTTP filters can use it to specify Jwt requirements dynamically. // // Example: // // .. code-block:: yaml // // name: jwt_selector // requires: // issuer_1: // provider_name: issuer1 // issuer_2: // provider_name: issuer2 // // If a filter set "jwt_selector" with "issuer_1" to FilterState for a request, // jwt_authn filter will use JwtRequirement{"provider_name": "issuer1"} to verify. message FilterStateRule { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.FilterStateRule"; // The filter state name to retrieve the `Router::StringAccessor` object. string name = 1 [(validate.rules).string = {min_len: 1}]; // A map of string keys to requirements. The string key is the string value // in the FilterState with the name specified in the *name* field above. map requires = 3; } // This is the Envoy HTTP filter config for JWT authentication. // // For example: // // .. code-block:: yaml // // providers: // provider1: // issuer: issuer1 // audiences: // - audience1 // - audience2 // remote_jwks: // http_uri: // uri: https://example.com/.well-known/jwks.json // cluster: example_jwks_cluster // provider2: // issuer: issuer2 // local_jwks: // inline_string: jwks_string // // rules: // # Not jwt verification is required for /health path // - match: // prefix: /health // // # Jwt verification for provider1 is required for path prefixed with "prefix" // - match: // prefix: /prefix // requires: // provider_name: provider1 // // # Jwt verification for either provider1 or provider2 is required for all other requests. // - match: // prefix: / // requires: // requires_any: // requirements: // - provider_name: provider1 // - provider_name: provider2 // message JwtAuthentication { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication"; // Map of provider names to JwtProviders. // // .. code-block:: yaml // // providers: // provider1: // issuer: issuer1 // audiences: // - audience1 // - audience2 // remote_jwks: // http_uri: // uri: https://example.com/.well-known/jwks.json // cluster: example_jwks_cluster // provider2: // issuer: provider2 // local_jwks: // inline_string: jwks_string // map providers = 1; // Specifies requirements based on the route matches. The first matched requirement will be // applied. If there are overlapped match conditions, please put the most specific match first. // // Examples // // .. code-block:: yaml // // rules: // - match: // prefix: /healthz // - match: // prefix: /baz // requires: // provider_name: provider1 // - match: // prefix: /foo // requires: // requires_any: // requirements: // - provider_name: provider1 // - provider_name: provider2 // - match: // prefix: /bar // requires: // requires_all: // requirements: // - provider_name: provider1 // - provider_name: provider2 // repeated RequirementRule rules = 2; // This message specifies Jwt requirements based on stream_info.filterState. // Other HTTP filters can use it to specify Jwt requirements dynamically. // The *rules* field above is checked first, if it could not find any matches, // check this one. FilterStateRule filter_state_rules = 3; // When set to true, bypass the `CORS preflight request // `_ regardless of JWT // requirements specified in the rules. bool bypass_cors_preflight = 4; } ================================================ FILE: api/envoy/extensions/filters/http/local_ratelimit/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/local_ratelimit/v3/local_rate_limit.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.local_ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/http_status.proto"; import "envoy/type/v3/token_bucket.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.local_ratelimit.v3"; option java_outer_classname = "LocalRateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Local Rate limit] // Local Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.local_ratelimit] // [#next-free-field: 7] message LocalRateLimit { // The human readable prefix to use when emitting stats. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // This field allows for a custom HTTP response status code to the downstream client when // the request has been rate limited. // Defaults to 429 (TooManyRequests). // // .. note:: // If this is set to < 400, 429 will be used instead. type.v3.HttpStatus status = 2; // The token bucket configuration to use for rate limiting requests that are processed by this // filter. Each request processed by the filter consumes a single token. If the token is available, // the request will be allowed. If no tokens are available, the request will receive the configured // rate limit status. // // .. note:: // It's fine for the token bucket to be unset for the global configuration since the rate limit // can be applied at a the virtual host or route level. Thus, the token bucket must be set // for the per route configuration otherwise the config will be rejected. // // .. note:: // When using per route configuration, the bucket becomes unique to that route. // // .. note:: // In the current implementation the token bucket's :ref:`fill_interval // ` must be >= 50ms to avoid too aggressive // refills. type.v3.TokenBucket token_bucket = 3; // If set, this will enable -- but not necessarily enforce -- the rate limit for the given // fraction of requests. // Defaults to 0% of requests for safety. config.core.v3.RuntimeFractionalPercent filter_enabled = 4; // If set, this will enforce the rate limit decisions for the given fraction of requests. // // Note: this only applies to the fraction of enabled requests. // // Defaults to 0% of requests for safety. config.core.v3.RuntimeFractionalPercent filter_enforced = 5; // Specifies a list of HTTP headers that should be added to each response for requests that // have been rate limited. repeated config.core.v3.HeaderValueOption response_headers_to_add = 6 [(validate.rules).repeated = {max_items: 10}]; } ================================================ FILE: api/envoy/extensions/filters/http/lua/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/http/lua/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/lua/v3/lua.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.lua.v3; import "envoy/config/core/v3/base.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.lua.v3"; option java_outer_classname = "LuaProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Lua] // Lua :ref:`configuration overview `. // [#extension: envoy.filters.http.lua] message Lua { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.lua.v2.Lua"; // The Lua code that Envoy will execute. This can be a very small script that // further loads code from disk if desired. Note that if JSON configuration is used, the code must // be properly escaped. YAML configuration may be easier to read since YAML supports multi-line // strings so complex scripts can be easily expressed inline in the configuration. string inline_code = 1 [(validate.rules).string = {min_len: 1}]; // Map of named Lua source codes that can be referenced in :ref:`LuaPerRoute // `. The Lua source codes can be // loaded from inline string or local files. // // Example: // // .. code-block:: yaml // // source_codes: // hello.lua: // inline_string: | // function envoy_on_response(response_handle) // -- Do something. // end // world.lua: // filename: /etc/lua/world.lua // map source_codes = 2; } message LuaPerRoute { oneof override { option (validate.required) = true; // Disable the Lua filter for this particular vhost or route. If disabled is specified in // multiple per-filter-configs, the most specific one will be used. bool disabled = 1 [(validate.rules).bool = {const: true}]; // A name of a Lua source code stored in // :ref:`Lua.source_codes `. string name = 2 [(validate.rules).string = {min_len: 1}]; // A configured per-route Lua source code that can be served by RDS or provided inline. config.core.v3.DataSource source_code = 3; } } ================================================ FILE: api/envoy/extensions/filters/http/oauth2/v3alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/route/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/oauth2/v3alpha/oauth.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.oauth2.v3alpha; import "envoy/config/core/v3/http_uri.proto"; import "envoy/config/route/v3/route_components.proto"; import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; import "envoy/type/matcher/v3/path.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v3alpha"; option java_outer_classname = "OauthProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: OAuth] // OAuth :ref:`configuration overview `. // [#extension: envoy.filters.http.oauth2] // message OAuth2Credentials { // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. string client_id = 1 [(validate.rules).string = {min_len: 1}]; // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. transport_sockets.tls.v3.SdsSecretConfig token_secret = 2 [(validate.rules).message = {required: true}]; // Configures how the secret token should be created. oneof token_formation { option (validate.required) = true; // If present, the secret token will be a HMAC using the provided secret. transport_sockets.tls.v3.SdsSecretConfig hmac_secret = 3 [(validate.rules).message = {required: true}]; } } // OAuth config // // [#next-free-field: 9] message OAuth2Config { // Endpoint on the authorization server to retrieve the access token from. config.core.v3.HttpUri token_endpoint = 1; // The endpoint redirect to for authorization in response to unauthorized requests. string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Credentials used for OAuth. OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; // The redirect URI passed to the authorization endpoint. Supports header formatting // tokens. For more information, including details on header value syntax, see the // documentation on :ref:`custom request headers `. // // This URI should not contain any query parameters. string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. type.matcher.v3.PathMatcher redirect_path_matcher = 5 [(validate.rules).message = {required: true}]; // The path to sign a user out, clearing their credential cookies. type.matcher.v3.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}]; // Forward the OAuth token as a Bearer to upstream web service. bool forward_bearer_token = 7; // Any request that matches any of the provided matchers will be passed through without OAuth validation. repeated config.route.v3.HeaderMatcher pass_through_matcher = 8; } // Filter config. message OAuth2 { // Leave this empty to disable OAuth2 for a specific route, using per filter config. OAuth2Config config = 1; } ================================================ FILE: api/envoy/extensions/filters/http/oauth2/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/oauth2/v4alpha/oauth.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.oauth2.v4alpha; import "envoy/config/core/v4alpha/http_uri.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; import "envoy/type/matcher/v4alpha/path.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.oauth2.v4alpha"; option java_outer_classname = "OauthProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: OAuth] // OAuth :ref:`configuration overview `. // [#extension: envoy.filters.http.oauth2] // message OAuth2Credentials { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Credentials"; // The client_id to be used in the authorize calls. This value will be URL encoded when sent to the OAuth server. string client_id = 1 [(validate.rules).string = {min_len: 1}]; // The secret used to retrieve the access token. This value will be URL encoded when sent to the OAuth server. transport_sockets.tls.v4alpha.SdsSecretConfig token_secret = 2 [(validate.rules).message = {required: true}]; // Configures how the secret token should be created. oneof token_formation { option (validate.required) = true; // If present, the secret token will be a HMAC using the provided secret. transport_sockets.tls.v4alpha.SdsSecretConfig hmac_secret = 3 [(validate.rules).message = {required: true}]; } } // OAuth config // // [#next-free-field: 9] message OAuth2Config { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2Config"; // Endpoint on the authorization server to retrieve the access token from. config.core.v4alpha.HttpUri token_endpoint = 1; // The endpoint redirect to for authorization in response to unauthorized requests. string authorization_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Credentials used for OAuth. OAuth2Credentials credentials = 3 [(validate.rules).message = {required: true}]; // The redirect URI passed to the authorization endpoint. Supports header formatting // tokens. For more information, including details on header value syntax, see the // documentation on :ref:`custom request headers `. // // This URI should not contain any query parameters. string redirect_uri = 4 [(validate.rules).string = {min_len: 1}]; // Matching criteria used to determine whether a path appears to be the result of a redirect from the authorization server. type.matcher.v4alpha.PathMatcher redirect_path_matcher = 5 [(validate.rules).message = {required: true}]; // The path to sign a user out, clearing their credential cookies. type.matcher.v4alpha.PathMatcher signout_path = 6 [(validate.rules).message = {required: true}]; // Forward the OAuth token as a Bearer to upstream web service. bool forward_bearer_token = 7; // Any request that matches any of the provided matchers will be passed through without OAuth validation. repeated config.route.v4alpha.HeaderMatcher pass_through_matcher = 8; } // Filter config. message OAuth2 { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.oauth2.v3alpha.OAuth2"; // Leave this empty to disable OAuth2 for a specific route, using per filter config. OAuth2Config config = 1; } ================================================ FILE: api/envoy/extensions/filters/http/on_demand/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/on_demand/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/on_demand/v3/on_demand.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.on_demand.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.on_demand.v3"; option java_outer_classname = "OnDemandProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: OnDemand] // IP tagging :ref:`configuration overview `. // [#extension: envoy.filters.http.on_demand] message OnDemand { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.on_demand.v2.OnDemand"; } ================================================ FILE: api/envoy/extensions/filters/http/original_src/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/original_src/v2alpha1:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/original_src/v3/original_src.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.original_src.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.original_src.v3"; option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. // The Original Src filter binds upstream connections to the original source address determined // for the request. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. // [#extension: envoy.filters.http.original_src] message OriginalSrc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.original_src.v2alpha1.OriginalSrc"; // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to // ensure that non-local addresses may be routed back through envoy when binding to the original // source address. The option will not be applied if the mark is 0. uint32 mark = 1; } ================================================ FILE: api/envoy/extensions/filters/http/ratelimit/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/rate_limit/v2:pkg", "//envoy/config/ratelimit/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/ratelimit/v3/rate_limit.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.ratelimit.v3; import "envoy/config/ratelimit/v3/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.ratelimit.v3"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.http.ratelimit] // [#next-free-field: 9] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rate_limit.v2.RateLimit"; // Defines the version of the standard to use for X-RateLimit headers. enum XRateLimitHeadersRFCVersion { // X-RateLimit headers disabled. OFF = 0; // Use `draft RFC Version 03 `_. DRAFT_VERSION_03 = 1; } // The rate limit domain to use when calling the rate limit service. string domain = 1 [(validate.rules).string = {min_len: 1}]; // Specifies the rate limit configurations to be applied with the same // stage number. If not set, the default stage number is 0. // // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The type of requests the filter should apply to. The supported // types are *internal*, *external* or *both*. A request is considered internal if // :ref:`x-envoy-internal` is set to true. If // :ref:`x-envoy-internal` is not set or false, a // request is considered external. The filter defaults to *both*, and it will apply to all request // types. string request_type = 3 [(validate.rules).string = {in: "internal" in: "external" in: "both" in: ""}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. google.protobuf.Duration timeout = 4; // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. // Defaults to false. bool failure_mode_deny = 5; // Specifies whether a `RESOURCE_EXHAUSTED` gRPC code must be returned instead // of the default `UNAVAILABLE` gRPC code for a rate limited gRPC call. The // HTTP code will be 200 for a gRPC response. bool rate_limited_as_resource_exhausted = 6; // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 7 [(validate.rules).message = {required: true}]; // Defines the standard version to use for X-RateLimit headers emitted by the filter: // // * ``X-RateLimit-Limit`` - indicates the request-quota associated to the // client in the current time-window followed by the description of the // quota policy. The values are returned by the rate limiting service in // :ref:`current_limit` // field. Example: `10, 10;w=1;name="per-ip", 1000;w=3600`. // * ``X-RateLimit-Remaining`` - indicates the remaining requests in the // current time-window. The values are returned by the rate limiting service // in :ref:`limit_remaining` // field. // * ``X-RateLimit-Reset`` - indicates the number of seconds until reset of // the current time-window. The values are returned by the rate limiting service // in :ref:`duration_until_reset` // field. // // In case rate limiting policy specifies more then one time window, the values // above represent the window that is closest to reaching its limit. // // For more information about the headers specification see selected version of // the `draft RFC `_. // // Disabled by default. XRateLimitHeadersRFCVersion enable_x_ratelimit_headers = 8 [(validate.rules).enum = {defined_only: true}]; } message RateLimitPerRoute { enum VhRateLimitsOptions { // Use the virtual host rate limits unless the route has a rate limit policy. OVERRIDE = 0; // Use the virtual host rate limits even if the route has a rate limit policy. INCLUDE = 1; // Ignore the virtual host rate limits even if the route does not have a rate limit policy. IGNORE = 2; } // Specifies if the rate limit filter should include the virtual host rate limits. VhRateLimitsOptions vh_rate_limits = 1 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/extensions/filters/http/rbac/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/rbac/v2:pkg", "//envoy/config/rbac/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/rbac/v3/rbac.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.rbac.v3; import "envoy/config/rbac/v3/rbac.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v3"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. // [#extension: envoy.filters.http.rbac] // RBAC filter config. message RBAC { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rbac.v2.RBAC"; // Specify the RBAC rules to be applied globally. // If absent, no enforcing RBAC policy will be applied. config.rbac.v3.RBAC rules = 1; // Shadow rules are not enforced by the filter (i.e., returning a 403) // but will emit stats and logs and can be used for rule testing. // If absent, no shadow RBAC policy will be applied. config.rbac.v3.RBAC shadow_rules = 2; } message RBACPerRoute { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.rbac.v2.RBACPerRoute"; reserved 1; // Override the global configuration of the filter with this new config. // If absent, the global RBAC policy will be disabled for this route. RBAC rbac = 2; } ================================================ FILE: api/envoy/extensions/filters/http/rbac/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/rbac/v4alpha:pkg", "//envoy/extensions/filters/http/rbac/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/rbac/v4alpha/rbac.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.rbac.v4alpha; import "envoy/config/rbac/v4alpha/rbac.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.rbac.v4alpha"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. // [#extension: envoy.filters.http.rbac] // RBAC filter config. message RBAC { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.rbac.v3.RBAC"; // Specify the RBAC rules to be applied globally. // If absent, no enforcing RBAC policy will be applied. config.rbac.v4alpha.RBAC rules = 1; // Shadow rules are not enforced by the filter (i.e., returning a 403) // but will emit stats and logs and can be used for rule testing. // If absent, no shadow RBAC policy will be applied. config.rbac.v4alpha.RBAC shadow_rules = 2; } message RBACPerRoute { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.rbac.v3.RBACPerRoute"; reserved 1; // Override the global configuration of the filter with this new config. // If absent, the global RBAC policy will be disabled for this route. RBAC rbac = 2; } ================================================ FILE: api/envoy/extensions/filters/http/router/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v3:pkg", "//envoy/config/filter/http/router/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/router/v3/router.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.router.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v3"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] // Router :ref:`configuration overview `. // [#extension: envoy.filters.http.router] // [#next-free-field: 7] message Router { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.router.v2.Router"; // Whether the router generates dynamic cluster statistics. Defaults to // true. Can be disabled in high performance scenarios. google.protobuf.BoolValue dynamic_stats = 1; // Whether to start a child span for egress routed calls. This can be // useful in scenarios where other filters (auth, ratelimit, etc.) make // outbound calls and have child spans rooted at the same ingress // parent. Defaults to false. bool start_child_span = 2; // Configuration for HTTP upstream logs emitted by the router. Upstream logs // are configured in the same way as access logs, but each log entry represents // an upstream request. Presuming retries are configured, multiple upstream // requests may be made for each downstream (inbound) request. repeated config.accesslog.v3.AccessLog upstream_log = 3; // Do not add any additional *x-envoy-* headers to requests or responses. This // only affects the :ref:`router filter generated *x-envoy-* headers // `, other Envoy filters and the HTTP // connection manager may continue to set *x-envoy-* headers. bool suppress_envoy_headers = 4; // Specifies a list of HTTP headers to strictly validate. Envoy will reject a // request and respond with HTTP status 400 if the request contains an invalid // value for any of the headers listed in this field. Strict header checking // is only supported for the following headers: // // Value must be a ','-delimited list (i.e. no spaces) of supported retry // policy values: // // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` // * :ref:`config_http_filters_router_x-envoy-retry-on` // // Value must be an integer: // // * :ref:`config_http_filters_router_x-envoy-max-retries` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` repeated string strict_check_headers = 5 [(validate.rules).repeated = { items { string { in: "x-envoy-upstream-rq-timeout-ms" in: "x-envoy-upstream-rq-per-try-timeout-ms" in: "x-envoy-max-retries" in: "x-envoy-retry-grpc-on" in: "x-envoy-retry-on" } } }]; // If not set, ingress Envoy will ignore // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress // Envoy, when deriving timeout for upstream cluster. bool respect_expected_rq_timeout = 6; } ================================================ FILE: api/envoy/extensions/filters/http/router/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v4alpha:pkg", "//envoy/extensions/filters/http/router/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/router/v4alpha/router.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.router.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.router.v4alpha"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Router] // Router :ref:`configuration overview `. // [#extension: envoy.filters.http.router] // [#next-free-field: 7] message Router { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.router.v3.Router"; // Whether the router generates dynamic cluster statistics. Defaults to // true. Can be disabled in high performance scenarios. google.protobuf.BoolValue dynamic_stats = 1; // Whether to start a child span for egress routed calls. This can be // useful in scenarios where other filters (auth, ratelimit, etc.) make // outbound calls and have child spans rooted at the same ingress // parent. Defaults to false. bool start_child_span = 2; // Configuration for HTTP upstream logs emitted by the router. Upstream logs // are configured in the same way as access logs, but each log entry represents // an upstream request. Presuming retries are configured, multiple upstream // requests may be made for each downstream (inbound) request. repeated config.accesslog.v4alpha.AccessLog upstream_log = 3; // Do not add any additional *x-envoy-* headers to requests or responses. This // only affects the :ref:`router filter generated *x-envoy-* headers // `, other Envoy filters and the HTTP // connection manager may continue to set *x-envoy-* headers. bool suppress_envoy_headers = 4; // Specifies a list of HTTP headers to strictly validate. Envoy will reject a // request and respond with HTTP status 400 if the request contains an invalid // value for any of the headers listed in this field. Strict header checking // is only supported for the following headers: // // Value must be a ','-delimited list (i.e. no spaces) of supported retry // policy values: // // * :ref:`config_http_filters_router_x-envoy-retry-grpc-on` // * :ref:`config_http_filters_router_x-envoy-retry-on` // // Value must be an integer: // // * :ref:`config_http_filters_router_x-envoy-max-retries` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-timeout-ms` // * :ref:`config_http_filters_router_x-envoy-upstream-rq-per-try-timeout-ms` repeated string strict_check_headers = 5 [(validate.rules).repeated = { items { string { in: "x-envoy-upstream-rq-timeout-ms" in: "x-envoy-upstream-rq-per-try-timeout-ms" in: "x-envoy-max-retries" in: "x-envoy-retry-grpc-on" in: "x-envoy-retry-on" } } }]; // If not set, ingress Envoy will ignore // :ref:`config_http_filters_router_x-envoy-expected-rq-timeout-ms` header, populated by egress // Envoy, when deriving timeout for upstream cluster. bool respect_expected_rq_timeout = 6; } ================================================ FILE: api/envoy/extensions/filters/http/squash/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/squash/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/squash/v3/squash.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.squash.v3; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.squash.v3"; option java_outer_classname = "SquashProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Squash] // Squash :ref:`configuration overview `. // [#extension: envoy.filters.http.squash] // [#next-free-field: 6] message Squash { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.squash.v2.Squash"; // The name of the cluster that hosts the Squash server. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // When the filter requests the Squash server to create a DebugAttachment, it will use this // structure as template for the body of the request. It can contain reference to environment // variables in the form of '{{ ENV_VAR_NAME }}'. These can be used to provide the Squash server // with more information to find the process to attach the debugger to. For example, in a // Istio/k8s environment, this will contain information on the pod: // // .. code-block:: json // // { // "spec": { // "attachment": { // "pod": "{{ POD_NAME }}", // "namespace": "{{ POD_NAMESPACE }}" // }, // "match_request": true // } // } // // (where POD_NAME, POD_NAMESPACE are configured in the pod via the Downward API) google.protobuf.Struct attachment_template = 2; // The timeout for individual requests sent to the Squash cluster. Defaults to 1 second. google.protobuf.Duration request_timeout = 3; // The total timeout Squash will delay a request and wait for it to be attached. Defaults to 60 // seconds. google.protobuf.Duration attachment_timeout = 4; // Amount of time to poll for the status of the attachment object in the Squash server // (to check if has been attached). Defaults to 1 second. google.protobuf.Duration attachment_poll_period = 5; } ================================================ FILE: api/envoy/extensions/filters/http/tap/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/http/tap/v2alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/tap/v3/tap.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.tap.v3; import "envoy/extensions/common/tap/v3/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. // [#extension: envoy.filters.http.tap] // Top level configuration for the tap filter. message Tap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.http.tap.v2alpha.Tap"; // Common configuration for the HTTP tap filter. common.tap.v3.CommonExtensionConfig common_config = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/extensions/filters/http/tap/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/extensions/common/tap/v4alpha:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/tap/v4alpha/tap.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.tap.v4alpha; import "envoy/extensions/common/tap/v4alpha/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.tap.v4alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Tap] // Tap :ref:`configuration overview `. // [#extension: envoy.filters.http.tap] // Top level configuration for the tap filter. message Tap { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.http.tap.v3.Tap"; // Common configuration for the HTTP tap filter. common.tap.v4alpha.CommonExtensionConfig common_config = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/extensions/filters/http/wasm/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/extensions/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/http/wasm/v3/wasm.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.http.wasm.v3; import "envoy/extensions/wasm/v3/wasm.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.http.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] // [#extension: envoy.filters.http.wasm] // Wasm :ref:`configuration overview `. message Wasm { // General Plugin configuration. envoy.extensions.wasm.v3.PluginConfig config = 1; } ================================================ FILE: api/envoy/extensions/filters/listener/http_inspector/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/listener/http_inspector/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/listener/http_inspector/v3/http_inspector.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.listener.http_inspector.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.http_inspector.v3"; option java_outer_classname = "HttpInspectorProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP Inspector Filter] // Detect whether the application protocol is HTTP. // [#extension: envoy.filters.listener.http_inspector] message HttpInspector { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.listener.http_inspector.v2.HttpInspector"; } ================================================ FILE: api/envoy/extensions/filters/listener/original_dst/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/listener/original_dst/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/listener/original_dst/v3/original_dst.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.listener.original_dst.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_dst.v3"; option java_outer_classname = "OriginalDstProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Original Dst Filter] // Use the Original destination address on downstream connections. // [#extension: envoy.filters.listener.original_dst] message OriginalDst { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.listener.original_dst.v2.OriginalDst"; } ================================================ FILE: api/envoy/extensions/filters/listener/original_src/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/listener/original_src/v2alpha1:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/listener/original_src/v3/original_src.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.listener.original_src.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.original_src.v3"; option java_outer_classname = "OriginalSrcProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Original Src Filter] // Use the Original source address on upstream connections. // [#extension: envoy.filters.listener.original_src] // The Original Src filter binds upstream connections to the original source address determined // for the connection. This address could come from something like the Proxy Protocol filter, or it // could come from trusted http headers. message OriginalSrc { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.listener.original_src.v2alpha1.OriginalSrc"; // Whether to bind the port to the one used in the original downstream connection. // [#not-implemented-hide:] bool bind_port = 1; // Sets the SO_MARK option on the upstream connection's socket to the provided value. Used to // ensure that non-local addresses may be routed back through envoy when binding to the original // source address. The option will not be applied if the mark is 0. uint32 mark = 2; } ================================================ FILE: api/envoy/extensions/filters/listener/proxy_protocol/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/listener/proxy_protocol/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/listener/proxy_protocol/v3/proxy_protocol.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.listener.proxy_protocol.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.proxy_protocol.v3"; option java_outer_classname = "ProxyProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Proxy Protocol Filter] // PROXY protocol listener filter. // [#extension: envoy.filters.listener.proxy_protocol] message ProxyProtocol { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.listener.proxy_protocol.v2.ProxyProtocol"; message KeyValuePair { // The namespace — if this is empty, the filter's namespace will be used. string metadata_namespace = 1; // The key to use within the namespace. string key = 2 [(validate.rules).string = {min_len: 1}]; } // A Rule defines what metadata to apply when a header is present or missing. message Rule { // The type that triggers the rule - required // TLV type is defined as uint8_t in proxy protocol. See `the spec // `_ for details. uint32 tlv_type = 1 [(validate.rules).uint32 = {lt: 256}]; // If the TLV type is present, apply this metadata KeyValuePair. KeyValuePair on_tlv_present = 2; } // The list of rules to apply to requests. repeated Rule rules = 1; } ================================================ FILE: api/envoy/extensions/filters/listener/tls_inspector/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/listener/tls_inspector/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/listener/tls_inspector/v3/tls_inspector.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.listener.tls_inspector.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.listener.tls_inspector.v3"; option java_outer_classname = "TlsInspectorProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: TLS Inspector Filter] // Allows detecting whether the transport appears to be TLS or plaintext. // [#extension: envoy.filters.listener.tls_inspector] message TlsInspector { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.listener.tls_inspector.v2.TlsInspector"; } ================================================ FILE: api/envoy/extensions/filters/network/client_ssl_auth/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/client_ssl_auth/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/client_ssl_auth/v3/client_ssl_auth.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.client_ssl_auth.v3; import "envoy/config/core/v3/address.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.client_ssl_auth.v3"; option java_outer_classname = "ClientSslAuthProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Client TLS authentication] // Client TLS authentication // :ref:`configuration overview `. // [#extension: envoy.filters.network.client_ssl_auth] message ClientSSLAuth { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.client_ssl_auth.v2.ClientSSLAuth"; // The :ref:`cluster manager ` cluster that runs // the authentication service. The filter will connect to the service every 60s to fetch the list // of principals. The service must support the expected :ref:`REST API // `. string auth_api_cluster = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; // The prefix to use when emitting :ref:`statistics // `. string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; // Time in milliseconds between principal refreshes from the // authentication service. Default is 60000 (60s). The actual fetch time // will be this value plus a random jittered value between // 0-refresh_delay_ms milliseconds. google.protobuf.Duration refresh_delay = 3; // An optional list of IP address and subnet masks that should be white // listed for access by the filter. If no list is provided, there is no // IP allowlist. repeated config.core.v3.CidrRange ip_white_list = 4 [(udpa.annotations.field_migrate).rename = "ip_allowlist"]; } ================================================ FILE: api/envoy/extensions/filters/network/direct_response/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/direct_response/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/direct_response/v3/config.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.direct_response.v3; import "envoy/config/core/v3/base.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.direct_response.v3"; option java_outer_classname = "ConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Direct response] // Direct response :ref:`configuration overview `. // [#extension: envoy.filters.network.direct_response] message Config { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.direct_response.v2.Config"; // Response data as a data source. config.core.v3.DataSource response = 1; } ================================================ FILE: api/envoy/extensions/filters/network/dubbo_proxy/router/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/dubbo/router/v2alpha1:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/dubbo_proxy/router/v3/router.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.dubbo_proxy.router.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.router.v3"; option java_outer_classname = "RouterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Router] // Dubbo router :ref:`configuration overview `. message Router { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.dubbo.router.v2alpha1.Router"; } ================================================ FILE: api/envoy/extensions/filters/network/dubbo_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/dubbo_proxy/v3/README.md ================================================ Protocol buffer definitions for the Dubbo proxy. ================================================ FILE: api/envoy/extensions/filters/network/dubbo_proxy/v3/dubbo_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.dubbo_proxy.v3; import "envoy/extensions/filters/network/dubbo_proxy/v3/route.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; option java_outer_classname = "DubboProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.dubbo_proxy] // Dubbo Protocol types supported by Envoy. enum ProtocolType { // the default protocol. Dubbo = 0; } // Dubbo Serialization types supported by Envoy. enum SerializationType { // the default serialization protocol. Hessian2 = 0; } // [#next-free-field: 6] message DubboProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboProxy"; // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Configure the protocol used. ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; // Configure the serialization protocol used. SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; // The route table for the connection manager is static and is specified in this property. repeated RouteConfiguration route_config = 4; // A list of individual Dubbo filters that make up the filter chain for requests made to the // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards // compatibility, if no dubbo_filters are specified, a default Dubbo router filter // (`envoy.filters.dubbo.router`) is used. repeated DubboFilter dubbo_filters = 5; } // DubboFilter configures a Dubbo filter. message DubboFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.dubbo_proxy.v2alpha1.DubboFilter"; // The name of the filter to instantiate. The name must match a supported // filter. string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. google.protobuf.Any config = 2; } ================================================ FILE: api/envoy/extensions/filters/network/dubbo_proxy/v3/route.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.dubbo_proxy.v3; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/v3/range.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Dubbo Proxy Route Configuration] // Dubbo Proxy :ref:`configuration overview `. // [#next-free-field: 6] message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteConfiguration"; // The name of the route configuration. Reserved for future use in asynchronous route discovery. string name = 1; // The interface name of the service. string interface = 2; // Which group does the interface belong to. string group = 3; // The version number of the interface. string version = 4; // The list of routes that will be matched, in order, against incoming requests. The first route // that matches will be used. repeated Route routes = 5; } message Route { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.dubbo_proxy.v2alpha1.Route"; // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. RouteAction route = 2 [(validate.rules).message = {required: true}]; } message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteMatch"; // Method level routing matching. MethodMatch method = 1; // Specifies a set of headers that the route should match on. The router will check the request’s // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). repeated config.route.v3.HeaderMatcher headers = 2; } message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.dubbo_proxy.v2alpha1.RouteAction"; oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed. string cluster = 1; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. // Currently ClusterWeight only supports the name and weight fields. config.route.v3.WeightedCluster weighted_clusters = 2; } } message MethodMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch"; // The parameter matching type. message ParameterMatchSpecifier { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.dubbo_proxy.v2alpha1.MethodMatch.ParameterMatchSpecifier"; oneof parameter_match_specifier { // If specified, header match will be performed based on the value of the header. string exact_match = 3; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. // The entire request header value must represent an integer in base 10 notation: consisting // of an optional plus or minus sign followed by a sequence of digits. The rule will not match // if the header value does not represent an integer. Match will fail for empty values, // floating point numbers or if only a subsequence of the header value is an integer. // // Examples: // // * For range [-10,0), route will match for header value -1, but not for 0, // "somestring", 10.9, "-1somestring" type.v3.Int64Range range_match = 4; } } // The name of the method. type.matcher.v3.StringMatcher name = 1; // Method parameter definition. // The key is the parameter index, starting from 0. // The value is the parameter matching type. map params_match = 2; } ================================================ FILE: api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/route/v4alpha:pkg", "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/dubbo_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.dubbo_proxy.v4alpha; import "envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; option java_outer_classname = "DubboProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Dubbo Proxy] // Dubbo Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.dubbo_proxy] // Dubbo Protocol types supported by Envoy. enum ProtocolType { // the default protocol. Dubbo = 0; } // Dubbo Serialization types supported by Envoy. enum SerializationType { // the default serialization protocol. Hessian2 = 0; } // [#next-free-field: 6] message DubboProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.dubbo_proxy.v3.DubboProxy"; // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Configure the protocol used. ProtocolType protocol_type = 2 [(validate.rules).enum = {defined_only: true}]; // Configure the serialization protocol used. SerializationType serialization_type = 3 [(validate.rules).enum = {defined_only: true}]; // The route table for the connection manager is static and is specified in this property. repeated RouteConfiguration route_config = 4; // A list of individual Dubbo filters that make up the filter chain for requests made to the // Dubbo proxy. Order matters as the filters are processed sequentially. For backwards // compatibility, if no dubbo_filters are specified, a default Dubbo router filter // (`envoy.filters.dubbo.router`) is used. repeated DubboFilter dubbo_filters = 5; } // DubboFilter configures a Dubbo filter. message DubboFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.dubbo_proxy.v3.DubboFilter"; // The name of the filter to instantiate. The name must match a supported // filter. string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being // instantiated. See the supported filters for further documentation. google.protobuf.Any config = 2; } ================================================ FILE: api/envoy/extensions/filters/network/dubbo_proxy/v4alpha/route.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.dubbo_proxy.v4alpha; import "envoy/config/route/v4alpha/route_components.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/v3/range.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.dubbo_proxy.v4alpha"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Dubbo Proxy Route Configuration] // Dubbo Proxy :ref:`configuration overview `. // [#next-free-field: 6] message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.dubbo_proxy.v3.RouteConfiguration"; // The name of the route configuration. Reserved for future use in asynchronous route discovery. string name = 1; // The interface name of the service. string interface = 2; // Which group does the interface belong to. string group = 3; // The version number of the interface. string version = 4; // The list of routes that will be matched, in order, against incoming requests. The first route // that matches will be used. repeated Route routes = 5; } message Route { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.dubbo_proxy.v3.Route"; // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. RouteAction route = 2 [(validate.rules).message = {required: true}]; } message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.dubbo_proxy.v3.RouteMatch"; // Method level routing matching. MethodMatch method = 1; // Specifies a set of headers that the route should match on. The router will check the request’s // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). repeated config.route.v4alpha.HeaderMatcher headers = 2; } message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.dubbo_proxy.v3.RouteAction"; oneof cluster_specifier { option (validate.required) = true; // Indicates the upstream cluster to which the request should be routed. string cluster = 1; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. // Currently ClusterWeight only supports the name and weight fields. config.route.v4alpha.WeightedCluster weighted_clusters = 2; } } message MethodMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch"; // The parameter matching type. message ParameterMatchSpecifier { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.dubbo_proxy.v3.MethodMatch.ParameterMatchSpecifier"; oneof parameter_match_specifier { // If specified, header match will be performed based on the value of the header. string exact_match = 3; // If specified, header match will be performed based on range. // The rule will match if the request header value is within this range. // The entire request header value must represent an integer in base 10 notation: consisting // of an optional plus or minus sign followed by a sequence of digits. The rule will not match // if the header value does not represent an integer. Match will fail for empty values, // floating point numbers or if only a subsequence of the header value is an integer. // // Examples: // // * For range [-10,0), route will match for header value -1, but not for 0, // "somestring", 10.9, "-1somestring" type.v3.Int64Range range_match = 4; } } // The name of the method. type.matcher.v4alpha.StringMatcher name = 1; // Method parameter definition. // The key is the parameter index, starting from 0. // The value is the parameter matching type. map params_match = 2; } ================================================ FILE: api/envoy/extensions/filters/network/echo/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/network/echo/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/echo/v3/echo.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.echo.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.echo.v3"; option java_outer_classname = "EchoProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Echo] // Echo :ref:`configuration overview `. // [#extension: envoy.filters.network.echo] message Echo { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.echo.v2.Echo"; } ================================================ FILE: api/envoy/extensions/filters/network/ext_authz/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/ext_authz/v2:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/ext_authz/v3/ext_authz.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.ext_authz.v3; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/grpc_service.proto"; import "envoy/type/matcher/v3/metadata.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v3"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Network External Authorization ] // The network layer external authorization service configuration // :ref:`configuration overview `. // [#extension: envoy.filters.network.ext_authz] // External Authorization filter calls out to an external service over the // gRPC Authorization API defined by // :ref:`CheckRequest `. // A failed check will cause this filter to close the TCP connection. // [#next-free-field: 7] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.ext_authz.v2.ExtAuthz"; // The prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The external authorization gRPC service configuration. // The default timeout is set to 200ms by this filter. config.core.v3.GrpcService grpc_service = 2; // The filter's behaviour in case the external authorization service does // not respond back. When it is set to true, Envoy will also allow traffic in case of // communication failure between authorization service and the proxy. // Defaults to false. bool failure_mode_allow = 3; // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 4; // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and // version of Check{Request,Response} used on the wire. config.core.v3.ApiVersion transport_api_version = 5 [(validate.rules).enum = {defined_only: true}]; // Specifies if the filter is enabled with metadata matcher. // If this field is not specified, the filter will be enabled for all requests. type.matcher.v3.MetadataMatcher filter_enabled_metadata = 6; } ================================================ FILE: api/envoy/extensions/filters/network/ext_authz/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/filters/network/ext_authz/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/ext_authz/v4alpha/ext_authz.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.ext_authz.v4alpha; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/grpc_service.proto"; import "envoy/type/matcher/v4alpha/metadata.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.ext_authz.v4alpha"; option java_outer_classname = "ExtAuthzProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Network External Authorization ] // The network layer external authorization service configuration // :ref:`configuration overview `. // [#extension: envoy.filters.network.ext_authz] // External Authorization filter calls out to an external service over the // gRPC Authorization API defined by // :ref:`CheckRequest `. // A failed check will cause this filter to close the TCP connection. // [#next-free-field: 7] message ExtAuthz { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.ext_authz.v3.ExtAuthz"; // The prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The external authorization gRPC service configuration. // The default timeout is set to 200ms by this filter. config.core.v4alpha.GrpcService grpc_service = 2; // The filter's behaviour in case the external authorization service does // not respond back. When it is set to true, Envoy will also allow traffic in case of // communication failure between authorization service and the proxy. // Defaults to false. bool failure_mode_allow = 3; // Specifies if the peer certificate is sent to the external service. // // When this field is true, Envoy will include the peer X.509 certificate, if available, in the // :ref:`certificate`. bool include_peer_certificate = 4; // API version for ext_authz transport protocol. This describes the ext_authz gRPC endpoint and // version of Check{Request,Response} used on the wire. config.core.v4alpha.ApiVersion transport_api_version = 5 [(validate.rules).enum = {defined_only: true}]; // Specifies if the filter is enabled with metadata matcher. // If this field is not specified, the filter will be enabled for all requests. type.matcher.v4alpha.MetadataMatcher filter_enabled_metadata = 6; } ================================================ FILE: api/envoy/extensions/filters/network/http_connection_manager/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/http_connection_manager/v2:pkg", "//envoy/config/route/v3:pkg", "//envoy/config/trace/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/config/core/v3/extension.proto"; import "envoy/config/core/v3/protocol.proto"; import "envoy/config/core/v3/substitution_format_string.proto"; import "envoy/config/route/v3/route.proto"; import "envoy/config/route/v3/scoped_route.proto"; import "envoy/config/trace/v3/http_tracer.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/resource_locator.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3"; option java_outer_classname = "HttpConnectionManagerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] // [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager"; enum CodecType { // For every new connection, the connection manager will determine which // codec to use. This mode supports both ALPN for TLS listeners as well as // protocol inference for plaintext listeners. If ALPN data is available, it // is preferred, otherwise protocol inference is used. In almost all cases, // this is the right option to choose for this setting. AUTO = 0; // The connection manager will assume that the client is speaking HTTP/1.1. HTTP1 = 1; // The connection manager will assume that the client is speaking HTTP/2 // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. // Prior knowledge is allowed). HTTP2 = 2; // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient // to distinguish HTTP1 and HTTP2 traffic. HTTP3 = 3; } enum ServerHeaderTransformation { // Overwrite any Server header with the contents of server_name. OVERWRITE = 0; // If no Server header is present, append Server server_name // If a Server header is present, pass it through. APPEND_IF_ABSENT = 1; // Pass through the value of the server header, and do not append a header // if none is present. PASS_THROUGH = 2; } // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. enum ForwardClientCertDetails { // Do not send the XFCC header to the next hop. This is the default value. SANITIZE = 0; // When the client connection is mTLS (Mutual TLS), forward the XFCC header // in the request. FORWARD_ONLY = 1; // When the client connection is mTLS, append the client certificate // information to the request’s XFCC header and forward it. APPEND_FORWARD = 2; // When the client connection is mTLS, reset the XFCC header with the client // certificate information and send it to the next hop. SANITIZE_SET = 3; // Always forward the XFCC header in the request, regardless of whether the // client connection is mTLS. ALWAYS_FORWARD_ONLY = 4; } // [#next-free-field: 10] message Tracing { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager.Tracing"; enum OperationName { // The HTTP listener is used for ingress/incoming requests. INGRESS = 0; // The HTTP listener is used for egress/outgoing requests. EGRESS = 1; } reserved 1, 2; reserved "operation_name", "request_headers_for_tags"; // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% type.v3.Percent client_sampling = 3; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.v3.Percent random_sampling = 4; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random // sampling). This field functions as an upper limit on the total configured sampling rate. For // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% // of client requests with the appropriate headers to be force traced. This field is a direct // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.v3.Percent overall_sampling = 5; // Whether to annotate spans with additional data. If true, spans will include logs for stream // events. bool verbose = 6; // Maximum length of the request path to extract and include in the HttpUrl tag. Used to // truncate lengthy request paths to meet the needs of a tracing backend. // Default: 256 google.protobuf.UInt32Value max_path_tag_length = 7; // A list of custom tags with unique tag name to create tags for the active span. repeated type.tracing.v3.CustomTag custom_tags = 8; // Configuration for an external tracing provider. // If not specified, no tracing will be performed. // // .. attention:: // Please be aware that *envoy.tracers.opencensus* provider can only be configured once // in Envoy lifetime. // Any attempts to reconfigure it or to use different configurations for different HCM filters // will be rejected. // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes // on OpenCensus side. config.trace.v3.Tracing.Http provider = 9; } message InternalAddressConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager." "InternalAddressConfig"; // Whether unix socket addresses should be considered internal. bool unix_sockets = 1; } // [#next-free-field: 7] message SetCurrentClientCertDetails { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager." "SetCurrentClientCertDetails"; reserved 2; // Whether to forward the subject of the client cert. Defaults to false. google.protobuf.BoolValue subject = 1; // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the // XFCC header comma separated from other values with the value Cert="PEM". // Defaults to false. bool cert = 3; // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM // format. This will appear in the XFCC header comma separated from other values with the value // Chain="PEM". // Defaults to false. bool chain = 6; // Whether to forward the DNS type Subject Alternative Names of the client cert. // Defaults to false. bool dns = 4; // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to // false. bool uri = 5; } // The configuration for HTTP upgrades. // For each upgrade type desired, an UpgradeConfig must be added. // // .. warning:: // // The current implementation of upgrade headers does not handle // multi-valued upgrade headers. Support for multi-valued headers may be // added in the future if needed. // // .. warning:: // The current implementation of upgrade headers does not work with HTTP/2 // upstreams. message UpgradeConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager." "UpgradeConfig"; // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] // will be proxied upstream. string upgrade_type = 1; // If present, this represents the filter chain which will be created for // this type of upgrade. If no filters are present, the filter chain for // HTTP connections will be used for this upgrade type. repeated HttpFilter filters = 2; // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } reserved 27, 11; reserved "idle_timeout"; // Supplies the type of codec that the connection manager should use. CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics for the // connection manager. See the :ref:`statistics documentation ` for // more information. string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; oneof route_specifier { option (validate.required) = true; // The connection manager’s route table will be dynamically loaded via the RDS API. Rds rds = 3; // The route table for the connection manager is static and is specified in this property. config.route.v3.RouteConfiguration route_config = 4; // A route table will be dynamically assigned to each request based on request attributes // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are // specified in this message. ScopedRoutes scoped_routes = 31; } // A list of individual HTTP filters that make up the filter chain for // requests made to the connection manager. :ref:`Order matters ` // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked // documentation for more information. Defaults to false. google.protobuf.BoolValue add_user_agent = 6; // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. Tracing tracing = 7; // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. config.core.v3.HttpProtocolOptions common_http_protocol_options = 35 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. config.core.v3.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. config.core.v3.Http2ProtocolOptions http2_protocol_options = 9 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. string server_name = 10 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in // server_name. ServerHeaderTransformation server_header_transformation = 34 [(validate.rules).enum = {defined_only: true}]; // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. // Requests that exceed this limit will receive a 431 response. // The max configurable limit is 96 KiB, based on current implementation // constraints. google.protobuf.UInt32Value max_request_headers_kb = 29 [(validate.rules).uint32 = {lte: 96 gt: 0}]; // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected // so as not to interfere with any smaller configured timeouts that may have // existed in configurations prior to the introduction of this feature, while // introducing robustness to TCP connections that terminate without a FIN. // // This idle timeout applies to new streams and is overridable by the // :ref:`route-level idle_timeout // `. Even on a stream in // which the override applies, prior to receipt of the initial request // headers, the :ref:`stream_idle_timeout // ` // applies. Each time an encode/decode event for headers or data is processed // for the stream, the timer will be reset. If the timeout fires, the stream // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough // window to write any remaining stream data once the entirety of stream data (local end stream is // true) has been buffered pending available window. In other words, this timeout defends against // a peer that does not release enough window to completely write the stream, even though all // data has been proxied within available flow control windows. If the timeout is hit in this // case, the :ref:`tx_flush_timeout ` counter will be // incremented. Note that :ref:`max_stream_duration // ` does not apply to // this corner case. // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the // wire while the connection manage is only able to observe the end-of-headers event, hence the // stream may still idle timeout. // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. google.protobuf.Duration stream_idle_timeout = 24 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. google.protobuf.Duration request_timeout = 28 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. // This is used so that Envoy provides a grace period for new streams that // race with the final GOAWAY frame. During this grace period, Envoy will // continue to accept new streams. After the grace period, a final GOAWAY // frame is sent and Envoy will start refusing new streams. Draining occurs // both when a connection hits the idle timeout or during general server // draining. The default grace period is 5000 milliseconds (5 seconds) if this // option is not specified. google.protobuf.Duration drain_timeout = 12; // The delayed close timeout is for downstream connections managed by the HTTP connection manager. // It is defined as a grace period after connection close processing has been locally initiated // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy // from the downstream connection) prior to Envoy closing the socket associated with that // connection. // NOTE: This timeout is enforced even when the socket associated with the downstream connection // is pending a flush of the write buffer. However, any progress made writing data to the socket // will restart the timer associated with this timeout. This means that the total grace period for // a socket in this state will be // +. // // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close // sequence mitigates a race condition that exists when downstream clients do not drain/process // data in a connection's receive buffer after a remote close has been detected via a socket // write(). This race leads to such clients failing to process the response code sent by Envoy, // which could result in erroneous downstream processing. // // If the timeout triggers, Envoy will close the connection's socket. // // The default timeout is 1000 ms if this option is not specified. // // .. NOTE:: // To be useful in avoiding the race condition described above, this timeout must be set // to *at least* +<100ms to account for // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. // // .. WARNING:: // A value of 0 will completely disable delayed close processing. When disabled, the downstream // connection's socket will be closed immediately after the write flush is completed or will // never close if the write flush does not complete. google.protobuf.Duration delayed_close_timeout = 26; // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. repeated config.accesslog.v3.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating // various headers. If set to false or absent, the connection manager will use the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. google.protobuf.BoolValue use_remote_address = 14 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when // determining the origin client's IP address. The default is zero if this option // is not specified. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. uint32 xff_num_trusted_hops = 19; // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information about internal/external addresses. InternalAddressConfig internal_address_config = 25; // If set, Envoy will not append the remote address to the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager // has mutated the request headers. While :ref:`use_remote_address // ` // will also suppress XFF addition, it has consequences for logging and other // Envoy uses of the remote address, so *skip_xff_append* should be used // when only an elision of XFF addition is intended. bool skip_xff_append = 21; // Via header value to append to request and response headers. If this is // empty, no via header will be appended. string via = 22; // Whether the connection manager will generate the :ref:`x-request-id // ` header if it does not exist. This defaults to // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature // is not desired it can be disabled. google.protobuf.BoolValue generate_request_id = 15; // Whether the connection manager will keep the :ref:`x-request-id // ` header if passed for a request that is edge // (Edge request is the request from external clients to front Envoy) and not reset it, which // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; // If set, Envoy will always set :ref:`x-request-id ` header in response. // If this is false or not set, the request ID is returned in responses only if tracing is forced using // :ref:`x-envoy-force-trace ` header. bool always_set_request_id_in_response = 37; // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 [(validate.rules).enum = {defined_only: true}]; // This field is valid only when :ref:`forward_client_cert_details // ` // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in // the client certificate to be forwarded. Note that in the // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and // *By* is always set when the client certificate presents the URI type Subject Alternative Name // value. SetCurrentClientCertDetails set_current_client_cert_details = 17; // If proxy_100_continue is true, Envoy will proxy incoming "Expect: // 100-continue" headers upstream, and forward "100 Continue" responses // downstream. If this is false or not set, Envoy will instead strip the // "Expect: 100-continue" header, and send a "100 Continue" response itself. bool proxy_100_continue = 18; // If // :ref:`use_remote_address // ` // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. // This is useful for testing compatibility of upstream services that parse the header value. For // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses // `_ for details. This will also affect the // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 // ` for runtime // control. // [#not-implemented-hide:] bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; repeated UpgradeConfig upgrade_configs = 23; // Should paths be normalized according to RFC 3986 before any processing of // requests by HTTP filters or routing? This affects the upstream *:path* header // as well. For paths that fail this check, Envoy will respond with 400 to // paths that are malformed. This defaults to false currently but will default // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as // generation, validation, and associated tracing operations. // // If not set, Envoy uses the default UUID-based behavior: // // 1. Request ID is propagated using *x-request-id* header. // // 2. Request ID is a universally unique identifier (UUID). // // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; // The configuration to customize local reply returned by Envoy. It can customize status code, // body text and response content type. If not specified, status code and text body are hard // coded in Envoy, the response content type is plain text. LocalReplyConfig local_reply_config = 38; // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. // Without setting this option, incoming requests with host `example:443` will not match against // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; // Governs Envoy's behavior when receiving invalid HTTP from downstream. // If this option is false (default), Envoy will err on the conservative side handling HTTP // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. // If this option is set to true, Envoy will be more permissive, only resetting the invalid // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire // request is read for HTTP/1.1) // In general this should be true for deployments receiving trusted traffic (L2 Envoys, // company-internal mesh) and false when receiving untrusted traffic (edge deployments). // // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message // ` or the new HTTP/2 option // :ref:`override_stream_error_on_invalid_http_message // ` // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging // ` google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; } // The configuration to customize local reply returned by Envoy. message LocalReplyConfig { // Configuration of list of mappers which allows to filter and change local response. // The mappers will be checked by the specified order until one is matched. repeated ResponseMapper mappers = 1; // The configuration to form response body from the :ref:`command operators ` // and to specify response content type as one of: plain/text or application/json. // // Example one: "plain/text" ``body_format``. // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // // The following response body in "plain/text" format will be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. // // .. code-block:: text // // upstream connect error:503:path=/foo // // Example two: "application/json" ``body_format``. // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // json_format: // status: "%RESPONSE_CODE%" // message: "%LOCAL_REPLY_BODY%" // path: "%REQ(:path)%" // // The following response body in "application/json" format would be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. // // .. code-block:: json // // { // "status": 503, // "message": "upstream connection error", // "path": "/foo" // } // config.core.v3.SubstitutionFormatString body_format = 2; } // The configuration to filter and change local response. // [#next-free-field: 6] message ResponseMapper { // Filter to determine if this mapper should apply. config.accesslog.v3.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; // The new response status code if specified. google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` // command operator in the `body_format`. config.core.v3.DataSource body = 3; // A per mapper `body_format` to override the :ref:`body_format `. // It will be used when this mapper is matched. config.core.v3.SubstitutionFormatString body_format_override = 4; // HTTP headers to add to a local reply. This allows the response mapper to append, to add // or to override headers of any local reply before it is sent to a downstream client. repeated config.core.v3.HeaderValueOption headers_to_add = 5 [(validate.rules).repeated = {max_items: 1000}]; } message Rds { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.Rds"; // Configuration source specifier for RDS. config.core.v3.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; // The name of the route configuration. This name will be passed to the RDS // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. string route_config_name = 2 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // Resource locator for RDS. This is mutually exclusive to *route_config_name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator rds_resource_locator = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; } // This message is used to work around the limitations with 'oneof' and repeated fields. message ScopedRouteConfigurationsList { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.ScopedRouteConfigurationsList"; repeated config.route.v3.ScopedRouteConfiguration scoped_route_configurations = 1 [(validate.rules).repeated = {min_items: 1}]; } // [#next-free-field: 6] message ScopedRoutes { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes"; // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These // keys are matched against a set of :ref:`Key` // objects assembled from :ref:`ScopedRouteConfiguration` // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via // :ref:`scoped_route_configurations_list`. // // Upon receiving a request's headers, the Router will build a key using the algorithm specified // by this message. This key will be used to look up the routing table (i.e., the // :ref:`RouteConfiguration`) to use for the request. message ScopeKeyBuilder { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder"; // Specifies the mechanism for constructing key fragments which are composed into scope keys. message FragmentBuilder { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder." "FragmentBuilder"; // Specifies how the value of a header should be extracted. // The following example maps the structure of a header to the fields in this message. // // .. code:: // // <0> <1> <-- index // X-Header: a=b;c=d // | || | // | || \----> // | || // | |\----> // | | // | \----> // | // \----> // // Each 'a=b' key-value pair constitutes an 'element' of the header field. message HeaderValueExtractor { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder." "FragmentBuilder.HeaderValueExtractor"; // Specifies a header field's key value pair to match on. message KvElement { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.ScopedRoutes.ScopeKeyBuilder." "FragmentBuilder.HeaderValueExtractor.KvElement"; // The separator between key and value (e.g., '=' separates 'k=v;...'). // If an element is an empty string, the element is ignored. // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. string separator = 1 [(validate.rules).string = {min_len: 1}]; // The key to match on. string key = 2 [(validate.rules).string = {min_len: 1}]; } // The name of the header field to extract the value from. string name = 1 [(validate.rules).string = {min_len: 1}]; // The element separator (e.g., ';' separates 'a;b;c;d'). // Default: empty string. This causes the entirety of the header field to be extracted. // If this field is set to an empty string and 'index' is used in the oneof below, 'index' // must be set to 0. string element_separator = 2; oneof extract_type { // Specifies the zero based index of the element to extract. // Note Envoy concatenates multiple values of the same header key into a comma separated // string, the splitting always happens after the concatenation. uint32 index = 3; // Specifies the key value pair to extract the value from. KvElement element = 4; } } oneof type { option (validate.required) = true; // Specifies how a header field's value should be extracted. HeaderValueExtractor header_value_extractor = 1; } } // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the // fragments of a :ref:`ScopedRouteConfiguration`. // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } // The name assigned to the scoped routing configuration. string name = 1 [(validate.rules).string = {min_len: 1}]; // The algorithm to use for constructing a scope key for each request. ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; // Configuration source specifier for RDS. // This config source is used to subscribe to RouteConfiguration resources specified in // ScopedRouteConfiguration messages. config.core.v3.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; oneof config_specifier { option (validate.required) = true; // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by // matching a key constructed from the request's attributes according to the algorithm specified // by the // :ref:`ScopeKeyBuilder` // in this message. ScopedRouteConfigurationsList scoped_route_configurations_list = 4; // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS // API. A scope is assigned to a request by matching a key constructed from the request's // attributes according to the algorithm specified by the // :ref:`ScopeKeyBuilder` // in this message. ScopedRds scoped_rds = 5; } } message ScopedRds { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.ScopedRds"; // Configuration source specifier for scoped RDS. config.core.v3.ConfigSource scoped_rds_config_source = 1 [(validate.rules).message = {required: true}]; } // [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.HttpFilter"; reserved 3, 2; reserved "config"; // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. google.protobuf.Any typed_config = 4; // Configuration source specifier for an extension configuration discovery service. // In case of a failure and without the default configuration, the HTTP listener responds with code 500. // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). config.core.v3.ExtensionConfigSource config_discovery = 5; } } message RequestIDExtension { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.http_connection_manager.v2.RequestIDExtension"; // Request ID extension specific configuration. google.protobuf.Any typed_config = 1; } ================================================ FILE: api/envoy/extensions/filters/network/http_connection_manager/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/accesslog/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/config/trace/v4alpha:pkg", "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/http_connection_manager/v4alpha/http_connection_manager.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.http_connection_manager.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/config/core/v4alpha/extension.proto"; import "envoy/config/core/v4alpha/protocol.proto"; import "envoy/config/core/v4alpha/substitution_format_string.proto"; import "envoy/config/route/v4alpha/route.proto"; import "envoy/config/route/v4alpha/scoped_route.proto"; import "envoy/config/trace/v4alpha/http_tracer.proto"; import "envoy/type/tracing/v3/custom_tag.proto"; import "envoy/type/v3/percent.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/core/v1/resource_locator.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/security.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v4alpha"; option java_outer_classname = "HttpConnectionManagerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: HTTP connection manager] // HTTP connection manager :ref:`configuration overview `. // [#extension: envoy.filters.network.http_connection_manager] // [#next-free-field: 41] message HttpConnectionManager { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"; enum CodecType { // For every new connection, the connection manager will determine which // codec to use. This mode supports both ALPN for TLS listeners as well as // protocol inference for plaintext listeners. If ALPN data is available, it // is preferred, otherwise protocol inference is used. In almost all cases, // this is the right option to choose for this setting. AUTO = 0; // The connection manager will assume that the client is speaking HTTP/1.1. HTTP1 = 1; // The connection manager will assume that the client is speaking HTTP/2 // (Envoy does not require HTTP/2 to take place over TLS or to use ALPN. // Prior knowledge is allowed). HTTP2 = 2; // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient // to distinguish HTTP1 and HTTP2 traffic. HTTP3 = 3; } enum ServerHeaderTransformation { // Overwrite any Server header with the contents of server_name. OVERWRITE = 0; // If no Server header is present, append Server server_name // If a Server header is present, pass it through. APPEND_IF_ABSENT = 1; // Pass through the value of the server header, and do not append a header // if none is present. PASS_THROUGH = 2; } // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. enum ForwardClientCertDetails { // Do not send the XFCC header to the next hop. This is the default value. SANITIZE = 0; // When the client connection is mTLS (Mutual TLS), forward the XFCC header // in the request. FORWARD_ONLY = 1; // When the client connection is mTLS, append the client certificate // information to the request’s XFCC header and forward it. APPEND_FORWARD = 2; // When the client connection is mTLS, reset the XFCC header with the client // certificate information and send it to the next hop. SANITIZE_SET = 3; // Always forward the XFCC header in the request, regardless of whether the // client connection is mTLS. ALWAYS_FORWARD_ONLY = 4; } // [#next-free-field: 10] message Tracing { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.Tracing"; enum OperationName { // The HTTP listener is used for ingress/incoming requests. INGRESS = 0; // The HTTP listener is used for egress/outgoing requests. EGRESS = 1; } reserved 1, 2; reserved "operation_name", "request_headers_for_tags"; // Target percentage of requests managed by this HTTP connection manager that will be force // traced if the :ref:`x-client-trace-id ` // header is set. This field is a direct analog for the runtime variable // 'tracing.client_sampling' in the :ref:`HTTP Connection Manager // `. // Default: 100% type.v3.Percent client_sampling = 3; // Target percentage of requests managed by this HTTP connection manager that will be randomly // selected for trace generation, if not requested by the client or not forced. This field is // a direct analog for the runtime variable 'tracing.random_sampling' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.v3.Percent random_sampling = 4; // Target percentage of requests managed by this HTTP connection manager that will be traced // after all other sampling checks have been applied (client-directed, force tracing, random // sampling). This field functions as an upper limit on the total configured sampling rate. For // instance, setting client_sampling to 100% but overall_sampling to 1% will result in only 1% // of client requests with the appropriate headers to be force traced. This field is a direct // analog for the runtime variable 'tracing.global_enabled' in the // :ref:`HTTP Connection Manager `. // Default: 100% type.v3.Percent overall_sampling = 5; // Whether to annotate spans with additional data. If true, spans will include logs for stream // events. bool verbose = 6; // Maximum length of the request path to extract and include in the HttpUrl tag. Used to // truncate lengthy request paths to meet the needs of a tracing backend. // Default: 256 google.protobuf.UInt32Value max_path_tag_length = 7; // A list of custom tags with unique tag name to create tags for the active span. repeated type.tracing.v3.CustomTag custom_tags = 8; // Configuration for an external tracing provider. // If not specified, no tracing will be performed. // // .. attention:: // Please be aware that *envoy.tracers.opencensus* provider can only be configured once // in Envoy lifetime. // Any attempts to reconfigure it or to use different configurations for different HCM filters // will be rejected. // Such a constraint is inherent to OpenCensus itself. It cannot be overcome without changes // on OpenCensus side. config.trace.v4alpha.Tracing.Http provider = 9; } message InternalAddressConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." "InternalAddressConfig"; // Whether unix socket addresses should be considered internal. bool unix_sockets = 1; } // [#next-free-field: 7] message SetCurrentClientCertDetails { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." "SetCurrentClientCertDetails"; reserved 2; // Whether to forward the subject of the client cert. Defaults to false. google.protobuf.BoolValue subject = 1; // Whether to forward the entire client cert in URL encoded PEM format. This will appear in the // XFCC header comma separated from other values with the value Cert="PEM". // Defaults to false. bool cert = 3; // Whether to forward the entire client cert chain (including the leaf cert) in URL encoded PEM // format. This will appear in the XFCC header comma separated from other values with the value // Chain="PEM". // Defaults to false. bool chain = 6; // Whether to forward the DNS type Subject Alternative Names of the client cert. // Defaults to false. bool dns = 4; // Whether to forward the URI type Subject Alternative Name of the client cert. Defaults to // false. bool uri = 5; } // The configuration for HTTP upgrades. // For each upgrade type desired, an UpgradeConfig must be added. // // .. warning:: // // The current implementation of upgrade headers does not handle // multi-valued upgrade headers. Support for multi-valued headers may be // added in the future if needed. // // .. warning:: // The current implementation of upgrade headers does not work with HTTP/2 // upstreams. message UpgradeConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager." "UpgradeConfig"; // The case-insensitive name of this upgrade, e.g. "websocket". // For each upgrade type present in upgrade_configs, requests with // Upgrade: [upgrade_type] // will be proxied upstream. string upgrade_type = 1; // If present, this represents the filter chain which will be created for // this type of upgrade. If no filters are present, the filter chain for // HTTP connections will be used for this upgrade type. repeated HttpFilter filters = 2; // Determines if upgrades are enabled or disabled by default. Defaults to true. // This can be overridden on a per-route basis with :ref:`cluster // ` as documented in the // :ref:`upgrade documentation `. google.protobuf.BoolValue enabled = 3; } reserved 27, 11; reserved "idle_timeout"; // Supplies the type of codec that the connection manager should use. CodecType codec_type = 1 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics for the // connection manager. See the :ref:`statistics documentation ` for // more information. string stat_prefix = 2 [(validate.rules).string = {min_len: 1}]; oneof route_specifier { option (validate.required) = true; // The connection manager’s route table will be dynamically loaded via the RDS API. Rds rds = 3; // The route table for the connection manager is static and is specified in this property. config.route.v4alpha.RouteConfiguration route_config = 4; // A route table will be dynamically assigned to each request based on request attributes // (e.g., the value of a header). The "routing scopes" (i.e., route tables) and "scope keys" are // specified in this message. ScopedRoutes scoped_routes = 31; } // A list of individual HTTP filters that make up the filter chain for // requests made to the connection manager. :ref:`Order matters ` // as the filters are processed sequentially as request events happen. repeated HttpFilter http_filters = 5; // Whether the connection manager manipulates the :ref:`config_http_conn_man_headers_user-agent` // and :ref:`config_http_conn_man_headers_downstream-service-cluster` headers. See the linked // documentation for more information. Defaults to false. google.protobuf.BoolValue add_user_agent = 6; // Presence of the object defines whether the connection manager // emits :ref:`tracing ` data to the :ref:`configured tracing provider // `. Tracing tracing = 7; // Additional settings for HTTP requests handled by the connection manager. These will be // applicable to both HTTP1 and HTTP2 requests. config.core.v4alpha.HttpProtocolOptions common_http_protocol_options = 35 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // Additional HTTP/1 settings that are passed to the HTTP/1 codec. config.core.v4alpha.Http1ProtocolOptions http_protocol_options = 8; // Additional HTTP/2 settings that are passed directly to the HTTP/2 codec. config.core.v4alpha.Http2ProtocolOptions http2_protocol_options = 9 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // An optional override that the connection manager will write to the server // header in responses. If not set, the default is *envoy*. string server_name = 10 [(validate.rules).string = {well_known_regex: HTTP_HEADER_VALUE strict: false}]; // Defines the action to be applied to the Server header on the response path. // By default, Envoy will overwrite the header with the value specified in // server_name. ServerHeaderTransformation server_header_transformation = 34 [(validate.rules).enum = {defined_only: true}]; // The maximum request headers size for incoming connections. // If unconfigured, the default max request headers allowed is 60 KiB. // Requests that exceed this limit will receive a 431 response. // The max configurable limit is 96 KiB, based on current implementation // constraints. google.protobuf.UInt32Value max_request_headers_kb = 29 [(validate.rules).uint32 = {lte: 96 gt: 0}]; // The stream idle timeout for connections managed by the connection manager. // If not specified, this defaults to 5 minutes. The default value was selected // so as not to interfere with any smaller configured timeouts that may have // existed in configurations prior to the introduction of this feature, while // introducing robustness to TCP connections that terminate without a FIN. // // This idle timeout applies to new streams and is overridable by the // :ref:`route-level idle_timeout // `. Even on a stream in // which the override applies, prior to receipt of the initial request // headers, the :ref:`stream_idle_timeout // ` // applies. Each time an encode/decode event for headers or data is processed // for the stream, the timer will be reset. If the timeout fires, the stream // is terminated with a 408 Request Timeout error code if no upstream response // header has been received, otherwise a stream reset occurs. // // This timeout also specifies the amount of time that Envoy will wait for the peer to open enough // window to write any remaining stream data once the entirety of stream data (local end stream is // true) has been buffered pending available window. In other words, this timeout defends against // a peer that does not release enough window to completely write the stream, even though all // data has been proxied within available flow control windows. If the timeout is hit in this // case, the :ref:`tx_flush_timeout ` counter will be // incremented. Note that :ref:`max_stream_duration // ` does not apply to // this corner case. // // Note that it is possible to idle timeout even if the wire traffic for a stream is non-idle, due // to the granularity of events presented to the connection manager. For example, while receiving // very large request headers, it may be the case that there is traffic regularly arriving on the // wire while the connection manage is only able to observe the end-of-headers event, hence the // stream may still idle timeout. // // A value of 0 will completely disable the connection manager stream idle // timeout, although per-route idle timeout overrides will continue to apply. google.protobuf.Duration stream_idle_timeout = 24 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The amount of time that Envoy will wait for the entire request to be received. // The timer is activated when the request is initiated, and is disarmed when the last byte of the // request is sent upstream (i.e. all decoding filters have processed the request), OR when the // response is initiated. If not specified or set to 0, this timeout is disabled. google.protobuf.Duration request_timeout = 28 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The time that Envoy will wait between sending an HTTP/2 “shutdown // notification” (GOAWAY frame with max stream ID) and a final GOAWAY frame. // This is used so that Envoy provides a grace period for new streams that // race with the final GOAWAY frame. During this grace period, Envoy will // continue to accept new streams. After the grace period, a final GOAWAY // frame is sent and Envoy will start refusing new streams. Draining occurs // both when a connection hits the idle timeout or during general server // draining. The default grace period is 5000 milliseconds (5 seconds) if this // option is not specified. google.protobuf.Duration drain_timeout = 12; // The delayed close timeout is for downstream connections managed by the HTTP connection manager. // It is defined as a grace period after connection close processing has been locally initiated // during which Envoy will wait for the peer to close (i.e., a TCP FIN/RST is received by Envoy // from the downstream connection) prior to Envoy closing the socket associated with that // connection. // NOTE: This timeout is enforced even when the socket associated with the downstream connection // is pending a flush of the write buffer. However, any progress made writing data to the socket // will restart the timer associated with this timeout. This means that the total grace period for // a socket in this state will be // +. // // Delaying Envoy's connection close and giving the peer the opportunity to initiate the close // sequence mitigates a race condition that exists when downstream clients do not drain/process // data in a connection's receive buffer after a remote close has been detected via a socket // write(). This race leads to such clients failing to process the response code sent by Envoy, // which could result in erroneous downstream processing. // // If the timeout triggers, Envoy will close the connection's socket. // // The default timeout is 1000 ms if this option is not specified. // // .. NOTE:: // To be useful in avoiding the race condition described above, this timeout must be set // to *at least* +<100ms to account for // a reasonable "worst" case processing time for a full iteration of Envoy's event loop>. // // .. WARNING:: // A value of 0 will completely disable delayed close processing. When disabled, the downstream // connection's socket will be closed immediately after the write flush is completed or will // never close if the write flush does not complete. google.protobuf.Duration delayed_close_timeout = 26; // Configuration for :ref:`HTTP access logs ` // emitted by the connection manager. repeated config.accesslog.v4alpha.AccessLog access_log = 13; // If set to true, the connection manager will use the real remote address // of the client connection when determining internal versus external origin and manipulating // various headers. If set to false or absent, the connection manager will use the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for`, // :ref:`config_http_conn_man_headers_x-envoy-internal`, and // :ref:`config_http_conn_man_headers_x-envoy-external-address` for more information. google.protobuf.BoolValue use_remote_address = 14 [(udpa.annotations.security).configure_for_untrusted_downstream = true]; // The number of additional ingress proxy hops from the right side of the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header to trust when // determining the origin client's IP address. The default is zero if this option // is not specified. See the documentation for // :ref:`config_http_conn_man_headers_x-forwarded-for` for more information. uint32 xff_num_trusted_hops = 19; // Configures what network addresses are considered internal for stats and header sanitation // purposes. If unspecified, only RFC1918 IP addresses will be considered internal. // See the documentation for :ref:`config_http_conn_man_headers_x-envoy-internal` for more // information about internal/external addresses. InternalAddressConfig internal_address_config = 25; // If set, Envoy will not append the remote address to the // :ref:`config_http_conn_man_headers_x-forwarded-for` HTTP header. This may be used in // conjunction with HTTP filters that explicitly manipulate XFF after the HTTP connection manager // has mutated the request headers. While :ref:`use_remote_address // ` // will also suppress XFF addition, it has consequences for logging and other // Envoy uses of the remote address, so *skip_xff_append* should be used // when only an elision of XFF addition is intended. bool skip_xff_append = 21; // Via header value to append to request and response headers. If this is // empty, no via header will be appended. string via = 22; // Whether the connection manager will generate the :ref:`x-request-id // ` header if it does not exist. This defaults to // true. Generating a random UUID4 is expensive so in high throughput scenarios where this feature // is not desired it can be disabled. google.protobuf.BoolValue generate_request_id = 15; // Whether the connection manager will keep the :ref:`x-request-id // ` header if passed for a request that is edge // (Edge request is the request from external clients to front Envoy) and not reset it, which // is the current Envoy behaviour. This defaults to false. bool preserve_external_request_id = 32; // If set, Envoy will always set :ref:`x-request-id ` header in response. // If this is false or not set, the request ID is returned in responses only if tracing is forced using // :ref:`x-envoy-force-trace ` header. bool always_set_request_id_in_response = 37; // How to handle the :ref:`config_http_conn_man_headers_x-forwarded-client-cert` (XFCC) HTTP // header. ForwardClientCertDetails forward_client_cert_details = 16 [(validate.rules).enum = {defined_only: true}]; // This field is valid only when :ref:`forward_client_cert_details // ` // is APPEND_FORWARD or SANITIZE_SET and the client connection is mTLS. It specifies the fields in // the client certificate to be forwarded. Note that in the // :ref:`config_http_conn_man_headers_x-forwarded-client-cert` header, *Hash* is always set, and // *By* is always set when the client certificate presents the URI type Subject Alternative Name // value. SetCurrentClientCertDetails set_current_client_cert_details = 17; // If proxy_100_continue is true, Envoy will proxy incoming "Expect: // 100-continue" headers upstream, and forward "100 Continue" responses // downstream. If this is false or not set, Envoy will instead strip the // "Expect: 100-continue" header, and send a "100 Continue" response itself. bool proxy_100_continue = 18; // If // :ref:`use_remote_address // ` // is true and represent_ipv4_remote_address_as_ipv4_mapped_ipv6 is true and the remote address is // an IPv4 address, the address will be mapped to IPv6 before it is appended to *x-forwarded-for*. // This is useful for testing compatibility of upstream services that parse the header value. For // example, 50.0.0.1 is represented as ::FFFF:50.0.0.1. See `IPv4-Mapped IPv6 Addresses // `_ for details. This will also affect the // :ref:`config_http_conn_man_headers_x-envoy-external-address` header. See // :ref:`http_connection_manager.represent_ipv4_remote_address_as_ipv4_mapped_ipv6 // ` for runtime // control. // [#not-implemented-hide:] bool represent_ipv4_remote_address_as_ipv4_mapped_ipv6 = 20; repeated UpgradeConfig upgrade_configs = 23; // Should paths be normalized according to RFC 3986 before any processing of // requests by HTTP filters or routing? This affects the upstream *:path* header // as well. For paths that fail this check, Envoy will respond with 400 to // paths that are malformed. This defaults to false currently but will default // true in the future. When not specified, this value may be overridden by the // runtime variable // :ref:`http_connection_manager.normalize_path`. // See `Normalization and Comparison `_ // for details of normalization. // Note that Envoy does not perform // `case normalization `_ google.protobuf.BoolValue normalize_path = 30; // Determines if adjacent slashes in the path are merged into one before any processing of // requests by HTTP filters or routing. This affects the upstream *:path* header as well. Without // setting this option, incoming requests with path `//dir///file` will not match against route // with `prefix` match set to `/dir`. Defaults to `false`. Note that slash merging is not part of // `HTTP spec `_ and is provided for convenience. bool merge_slashes = 33; // The configuration of the request ID extension. This includes operations such as // generation, validation, and associated tracing operations. // // If not set, Envoy uses the default UUID-based behavior: // // 1. Request ID is propagated using *x-request-id* header. // // 2. Request ID is a universally unique identifier (UUID). // // 3. Tracing decision (sampled, forced, etc) is set in 14th byte of the UUID. RequestIDExtension request_id_extension = 36; // The configuration to customize local reply returned by Envoy. It can customize status code, // body text and response content type. If not specified, status code and text body are hard // coded in Envoy, the response content type is plain text. LocalReplyConfig local_reply_config = 38; // Determines if the port part should be removed from host/authority header before any processing // of request by HTTP filters or routing. The port would be removed only if it is equal to the :ref:`listener's` // local port and request method is not CONNECT. This affects the upstream host header as well. // Without setting this option, incoming requests with host `example:443` will not match against // route with :ref:`domains` match set to `example`. Defaults to `false`. Note that port removal is not part // of `HTTP spec `_ and is provided for convenience. bool strip_matching_host_port = 39; // Governs Envoy's behavior when receiving invalid HTTP from downstream. // If this option is false (default), Envoy will err on the conservative side handling HTTP // errors, terminating both HTTP/1.1 and HTTP/2 connections when receiving an invalid request. // If this option is set to true, Envoy will be more permissive, only resetting the invalid // stream in the case of HTTP/2 and leaving the connection open where possible (if the entire // request is read for HTTP/1.1) // In general this should be true for deployments receiving trusted traffic (L2 Envoys, // company-internal mesh) and false when receiving untrusted traffic (edge deployments). // // If different behaviors for invalid_http_message for HTTP/1 and HTTP/2 are // desired, one should use the new HTTP/1 option :ref:`override_stream_error_on_invalid_http_message // ` or the new HTTP/2 option // :ref:`override_stream_error_on_invalid_http_message // ` // *not* the deprecated but similarly named :ref:`stream_error_on_invalid_http_messaging // ` google.protobuf.BoolValue stream_error_on_invalid_http_message = 40; } // The configuration to customize local reply returned by Envoy. message LocalReplyConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.LocalReplyConfig"; // Configuration of list of mappers which allows to filter and change local response. // The mappers will be checked by the specified order until one is matched. repeated ResponseMapper mappers = 1; // The configuration to form response body from the :ref:`command operators ` // and to specify response content type as one of: plain/text or application/json. // // Example one: "plain/text" ``body_format``. // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // text_format: "%LOCAL_REPLY_BODY%:%RESPONSE_CODE%:path=%REQ(:path)%\n" // // The following response body in "plain/text" format will be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. // // .. code-block:: text // // upstream connect error:503:path=/foo // // Example two: "application/json" ``body_format``. // // .. validated-code-block:: yaml // :type-name: envoy.config.core.v3.SubstitutionFormatString // // json_format: // status: "%RESPONSE_CODE%" // message: "%LOCAL_REPLY_BODY%" // path: "%REQ(:path)%" // // The following response body in "application/json" format would be generated for a request with // local reply body of "upstream connection error", response_code=503 and path=/foo. // // .. code-block:: json // // { // "status": 503, // "message": "upstream connection error", // "path": "/foo" // } // config.core.v4alpha.SubstitutionFormatString body_format = 2; } // The configuration to filter and change local response. // [#next-free-field: 6] message ResponseMapper { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ResponseMapper"; // Filter to determine if this mapper should apply. config.accesslog.v4alpha.AccessLogFilter filter = 1 [(validate.rules).message = {required: true}]; // The new response status code if specified. google.protobuf.UInt32Value status_code = 2 [(validate.rules).uint32 = {lt: 600 gte: 200}]; // The new local reply body text if specified. It will be used in the `%LOCAL_REPLY_BODY%` // command operator in the `body_format`. config.core.v4alpha.DataSource body = 3; // A per mapper `body_format` to override the :ref:`body_format `. // It will be used when this mapper is matched. config.core.v4alpha.SubstitutionFormatString body_format_override = 4; // HTTP headers to add to a local reply. This allows the response mapper to append, to add // or to override headers of any local reply before it is sent to a downstream client. repeated config.core.v4alpha.HeaderValueOption headers_to_add = 5 [(validate.rules).repeated = {max_items: 1000}]; } message Rds { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.Rds"; // Configuration source specifier for RDS. config.core.v4alpha.ConfigSource config_source = 1 [(validate.rules).message = {required: true}]; oneof name_specifier { // The name of the route configuration. This name will be passed to the RDS // API. This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. string route_config_name = 2; // Resource locator for RDS. This is mutually exclusive to *route_config_name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator rds_resource_locator = 3; } } // This message is used to work around the limitations with 'oneof' and repeated fields. message ScopedRouteConfigurationsList { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRouteConfigurationsList"; repeated config.route.v4alpha.ScopedRouteConfiguration scoped_route_configurations = 1 [(validate.rules).repeated = {min_items: 1}]; } // [#next-free-field: 6] message ScopedRoutes { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes"; // Specifies the mechanism for constructing "scope keys" based on HTTP request attributes. These // keys are matched against a set of :ref:`Key` // objects assembled from :ref:`ScopedRouteConfiguration` // messages distributed via SRDS (the Scoped Route Discovery Service) or assigned statically via // :ref:`scoped_route_configurations_list`. // // Upon receiving a request's headers, the Router will build a key using the algorithm specified // by this message. This key will be used to look up the routing table (i.e., the // :ref:`RouteConfiguration`) to use for the request. message ScopeKeyBuilder { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes.ScopeKeyBuilder"; // Specifies the mechanism for constructing key fragments which are composed into scope keys. message FragmentBuilder { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." "ScopeKeyBuilder.FragmentBuilder"; // Specifies how the value of a header should be extracted. // The following example maps the structure of a header to the fields in this message. // // .. code:: // // <0> <1> <-- index // X-Header: a=b;c=d // | || | // | || \----> // | || // | |\----> // | | // | \----> // | // \----> // // Each 'a=b' key-value pair constitutes an 'element' of the header field. message HeaderValueExtractor { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor"; // Specifies a header field's key value pair to match on. message KvElement { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRoutes." "ScopeKeyBuilder.FragmentBuilder.HeaderValueExtractor.KvElement"; // The separator between key and value (e.g., '=' separates 'k=v;...'). // If an element is an empty string, the element is ignored. // If an element contains no separator, the whole element is parsed as key and the // fragment value is an empty string. // If there are multiple values for a matched key, the first value is returned. string separator = 1 [(validate.rules).string = {min_len: 1}]; // The key to match on. string key = 2 [(validate.rules).string = {min_len: 1}]; } // The name of the header field to extract the value from. string name = 1 [(validate.rules).string = {min_len: 1}]; // The element separator (e.g., ';' separates 'a;b;c;d'). // Default: empty string. This causes the entirety of the header field to be extracted. // If this field is set to an empty string and 'index' is used in the oneof below, 'index' // must be set to 0. string element_separator = 2; oneof extract_type { // Specifies the zero based index of the element to extract. // Note Envoy concatenates multiple values of the same header key into a comma separated // string, the splitting always happens after the concatenation. uint32 index = 3; // Specifies the key value pair to extract the value from. KvElement element = 4; } } oneof type { option (validate.required) = true; // Specifies how a header field's value should be extracted. HeaderValueExtractor header_value_extractor = 1; } } // The final(built) scope key consists of the ordered union of these fragments, which are compared in order with the // fragments of a :ref:`ScopedRouteConfiguration`. // A missing fragment during comparison will make the key invalid, i.e., the computed key doesn't match any key. repeated FragmentBuilder fragments = 1 [(validate.rules).repeated = {min_items: 1}]; } // The name assigned to the scoped routing configuration. string name = 1 [(validate.rules).string = {min_len: 1}]; // The algorithm to use for constructing a scope key for each request. ScopeKeyBuilder scope_key_builder = 2 [(validate.rules).message = {required: true}]; // Configuration source specifier for RDS. // This config source is used to subscribe to RouteConfiguration resources specified in // ScopedRouteConfiguration messages. config.core.v4alpha.ConfigSource rds_config_source = 3 [(validate.rules).message = {required: true}]; oneof config_specifier { option (validate.required) = true; // The set of routing scopes corresponding to the HCM. A scope is assigned to a request by // matching a key constructed from the request's attributes according to the algorithm specified // by the // :ref:`ScopeKeyBuilder` // in this message. ScopedRouteConfigurationsList scoped_route_configurations_list = 4; // The set of routing scopes associated with the HCM will be dynamically loaded via the SRDS // API. A scope is assigned to a request by matching a key constructed from the request's // attributes according to the algorithm specified by the // :ref:`ScopeKeyBuilder` // in this message. ScopedRds scoped_rds = 5; } } message ScopedRds { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.ScopedRds"; // Configuration source specifier for scoped RDS. config.core.v4alpha.ConfigSource scoped_rds_config_source = 1 [(validate.rules).message = {required: true}]; } // [#next-free-field: 6] message HttpFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter"; reserved 3, 2; reserved "config"; // The name of the filter configuration. The name is used as a fallback to // select an extension if the type of the configuration proto is not // sufficient. It also serves as a resource name in ExtensionConfigDS. string name = 1 [(validate.rules).string = {min_len: 1}]; oneof config_type { // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. google.protobuf.Any typed_config = 4; // Configuration source specifier for an extension configuration discovery service. // In case of a failure and without the default configuration, the HTTP listener responds with code 500. // Extension configs delivered through this mechanism are not expected to require warming (see https://github.com/envoyproxy/envoy/issues/12061). config.core.v4alpha.ExtensionConfigSource config_discovery = 5; } } message RequestIDExtension { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.http_connection_manager.v3.RequestIDExtension"; // Request ID extension specific configuration. google.protobuf.Any typed_config = 1; } ================================================ FILE: api/envoy/extensions/filters/network/kafka_broker/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/network/kafka_broker/v2alpha1:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/kafka_broker/v3/kafka_broker.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.kafka_broker.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.kafka_broker.v3"; option java_outer_classname = "KafkaBrokerProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Kafka Broker] // Kafka Broker :ref:`configuration overview `. // [#extension: envoy.filters.network.kafka_broker] message KafkaBroker { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.kafka_broker.v2alpha1.KafkaBroker"; // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; } ================================================ FILE: api/envoy/extensions/filters/network/local_ratelimit/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/local_rate_limit/v2alpha:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/local_ratelimit/v3/local_rate_limit.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.local_ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/token_bucket.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.local_ratelimit.v3"; option java_outer_classname = "LocalRateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Local rate limit] // Local rate limit :ref:`configuration overview `. // [#extension: envoy.filters.network.local_ratelimit] message LocalRateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.local_rate_limit.v2alpha.LocalRateLimit"; // The prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The token bucket configuration to use for rate limiting connections that are processed by the // filter's filter chain. Each incoming connection processed by the filter consumes a single // token. If the token is available, the connection will be allowed. If no tokens are available, // the connection will be immediately closed. // // .. note:: // In the current implementation each filter and filter chain has an independent rate limit. // // .. note:: // In the current implementation the token bucket's :ref:`fill_interval // ` must be >= 50ms to avoid too aggressive // refills. type.v3.TokenBucket token_bucket = 2 [(validate.rules).message = {required: true}]; // Runtime flag that controls whether the filter is enabled or not. If not specified, defaults // to enabled. config.core.v3.RuntimeFeatureFlag runtime_enabled = 3; } ================================================ FILE: api/envoy/extensions/filters/network/mongo_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/network/mongo_proxy/v2:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/mongo_proxy/v3/mongo_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.mongo_proxy.v3; import "envoy/extensions/filters/common/fault/v3/fault.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.mongo_proxy.v3"; option java_outer_classname = "MongoProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Mongo proxy] // MongoDB :ref:`configuration overview `. // [#extension: envoy.filters.network.mongo_proxy] message MongoProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.mongo_proxy.v2.MongoProxy"; // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The optional path to use for writing Mongo access logs. If not access log // path is specified no access logs will be written. Note that access log is // also gated :ref:`runtime `. string access_log = 2; // Inject a fixed delay before proxying a Mongo operation. Delays are // applied to the following MongoDB operations: Query, Insert, GetMore, // and KillCursors. Once an active delay is in progress, all incoming // data up until the timer event fires will be a part of the delay. common.fault.v3.FaultDelay delay = 3; // Flag to specify whether :ref:`dynamic metadata // ` should be emitted. Defaults to false. bool emit_dynamic_metadata = 4; } ================================================ FILE: api/envoy/extensions/filters/network/mysql_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/mysql_proxy/v3/mysql_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.mysql_proxy.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.mysql_proxy.v3"; option java_outer_classname = "MysqlProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: MySQL proxy] // MySQL Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.mysql_proxy] message MySQLProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.mysql_proxy.v1alpha1.MySQLProxy"; // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] The optional path to use for writing MySQL access logs. // If the access log field is empty, access logs will not be written. string access_log = 2; } ================================================ FILE: api/envoy/extensions/filters/network/postgres_proxy/v3alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/filters/network/postgres_proxy/v3alpha/postgres_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.postgres_proxy.v3alpha; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.postgres_proxy.v3alpha"; option java_outer_classname = "PostgresProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Postgres proxy] // Postgres Proxy :ref:`configuration overview // `. // [#extension: envoy.filters.network.postgres_proxy] message PostgresProxy { // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Controls whether SQL statements received in Frontend Query messages // are parsed. Parsing is required to produce Postgres proxy filter // metadata. Defaults to true. google.protobuf.BoolValue enable_sql_parsing = 2; } ================================================ FILE: api/envoy/extensions/filters/network/ratelimit/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/network/rate_limit/v2:pkg", "//envoy/config/ratelimit/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/ratelimit/v3/rate_limit.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.ratelimit.v3; import "envoy/config/ratelimit/v3/rls.proto"; import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.ratelimit.v3"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.network.ratelimit] // [#next-free-field: 7] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.rate_limit.v2.RateLimit"; // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The rate limit domain to use in the rate limit service request. string domain = 2 [(validate.rules).string = {min_len: 1}]; // The rate limit descriptor list to use in the rate limit service request. repeated common.ratelimit.v3.RateLimitDescriptor descriptors = 3 [(validate.rules).repeated = {min_items: 1}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. google.protobuf.Duration timeout = 4; // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. // Defaults to false. bool failure_mode_deny = 5; // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 6 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/extensions/filters/network/rbac/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/network/rbac/v2:pkg", "//envoy/config/rbac/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/rbac/v3/rbac.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.rbac.v3; import "envoy/config/rbac/v3/rbac.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v3"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. // [#extension: envoy.filters.network.rbac] // RBAC network filter config. // // Header should not be used in rules/shadow_rules in RBAC network filter as // this information is only available in :ref:`RBAC http filter `. message RBAC { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.rbac.v2.RBAC"; enum EnforcementType { // Apply RBAC policies when the first byte of data arrives on the connection. ONE_TIME_ON_FIRST_BYTE = 0; // Continuously apply RBAC policies as data arrives. Use this mode when // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, // etc. when the protocol decoders emit dynamic metadata such as the // resources being accessed and the operations on the resources. CONTINUOUS = 1; } // Specify the RBAC rules to be applied globally. // If absent, no enforcing RBAC policy will be applied. config.rbac.v3.RBAC rules = 1; // Shadow rules are not enforced by the filter but will emit stats and logs // and can be used for rule testing. // If absent, no shadow RBAC policy will be applied. config.rbac.v3.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in // conjunction with filters that emit dynamic metadata after decoding // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to // CONTINUOUS to enforce RBAC policies on every message boundary. EnforcementType enforcement_type = 4; } ================================================ FILE: api/envoy/extensions/filters/network/rbac/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/rbac/v4alpha:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/rbac/v4alpha/rbac.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.rbac.v4alpha; import "envoy/config/rbac/v4alpha/rbac.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.rbac.v4alpha"; option java_outer_classname = "RbacProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: RBAC] // Role-Based Access Control :ref:`configuration overview `. // [#extension: envoy.filters.network.rbac] // RBAC network filter config. // // Header should not be used in rules/shadow_rules in RBAC network filter as // this information is only available in :ref:`RBAC http filter `. message RBAC { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.rbac.v3.RBAC"; enum EnforcementType { // Apply RBAC policies when the first byte of data arrives on the connection. ONE_TIME_ON_FIRST_BYTE = 0; // Continuously apply RBAC policies as data arrives. Use this mode when // using RBAC with message oriented protocols such as Mongo, MySQL, Kafka, // etc. when the protocol decoders emit dynamic metadata such as the // resources being accessed and the operations on the resources. CONTINUOUS = 1; } // Specify the RBAC rules to be applied globally. // If absent, no enforcing RBAC policy will be applied. config.rbac.v4alpha.RBAC rules = 1; // Shadow rules are not enforced by the filter but will emit stats and logs // and can be used for rule testing. // If absent, no shadow RBAC policy will be applied. config.rbac.v4alpha.RBAC shadow_rules = 2; // The prefix to use when emitting statistics. string stat_prefix = 3 [(validate.rules).string = {min_len: 1}]; // RBAC enforcement strategy. By default RBAC will be enforced only once // when the first byte of data arrives from the downstream. When used in // conjunction with filters that emit dynamic metadata after decoding // every payload (e.g., Mongo, MySQL, Kafka) set the enforcement type to // CONTINUOUS to enforce RBAC policies on every message boundary. EnforcementType enforcement_type = 4; } ================================================ FILE: api/envoy/extensions/filters/network/redis_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/redis_proxy/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/redis_proxy/v3/redis_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.redis_proxy.v3; import "envoy/config/core/v3/base.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.redis_proxy.v3"; option java_outer_classname = "RedisProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Redis Proxy] // Redis Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.redis_proxy] // [#next-free-field: 9] message RedisProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy"; // Redis connection pool settings. // [#next-free-field: 9] message ConnPoolSettings { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy.ConnPoolSettings"; // ReadPolicy controls how Envoy routes read commands to Redis nodes. This is currently // supported for Redis Cluster. All ReadPolicy settings except MASTER may return stale data // because replication is asynchronous and requires some delay. You need to ensure that your // application can tolerate stale data. enum ReadPolicy { // Default mode. Read from the current primary node. MASTER = 0 [(udpa.annotations.enum_value_migrate).rename = "PRIMARY"]; // Read from the primary, but if it is unavailable, read from replica nodes. PREFER_MASTER = 1 [(udpa.annotations.enum_value_migrate).rename = "PREFER_PRIMARY"]; // Read from replica nodes. If multiple replica nodes are present within a shard, a random // node is selected. Healthy nodes have precedent over unhealthy nodes. REPLICA = 2; // Read from the replica nodes (similar to REPLICA), but if all replicas are unavailable (not // present or unhealthy), read from the primary. PREFER_REPLICA = 3; // Read from any node of the cluster. A random node is selected among the primary and // replicas, healthy nodes have precedent over unhealthy nodes. ANY = 4; } // Per-operation timeout in milliseconds. The timer starts when the first // command of a pipeline is written to the backend connection. Each response received from Redis // resets the timer since it signifies that the next command is being processed by the backend. // The only exception to this behavior is when a connection to a backend is not yet established. // In that case, the connect timeout on the cluster will govern the timeout until the connection // is ready. google.protobuf.Duration op_timeout = 1 [(validate.rules).duration = {required: true}]; // Use hash tagging on every redis key to guarantee that keys with the same hash tag will be // forwarded to the same upstream. The hash key used for determining the upstream in a // consistent hash ring configuration will be computed from the hash tagged key instead of the // whole key. The algorithm used to compute the hash tag is identical to the `redis-cluster // implementation `_. // // Examples: // // * '{user1000}.following' and '{user1000}.followers' **will** be sent to the same upstream // * '{user1000}.following' and '{user1001}.following' **might** be sent to the same upstream bool enable_hashtagging = 2; // Accept `moved and ask redirection // `_ errors from upstream // redis servers, and retry commands to the specified target server. The target server does not // need to be known to the cluster manager. If the command cannot be redirected, then the // original error is passed downstream unchanged. By default, this support is not enabled. bool enable_redirection = 3; // Maximum size of encoded request buffer before flush is triggered and encoded requests // are sent upstream. If this is unset, the buffer flushes whenever it receives data // and performs no batching. // This feature makes it possible for multiple clients to send requests to Envoy and have // them batched- for example if one is running several worker processes, each with its own // Redis connection. There is no benefit to using this with a single downstream process. // Recommended size (if enabled) is 1024 bytes. uint32 max_buffer_size_before_flush = 4; // The encoded request buffer is flushed N milliseconds after the first request has been // encoded, unless the buffer size has already exceeded `max_buffer_size_before_flush`. // If `max_buffer_size_before_flush` is not set, this flush timer is not used. Otherwise, // the timer should be set according to the number of clients, overall request rate and // desired maximum latency for a single command. For example, if there are many requests // being batched together at a high rate, the buffer will likely be filled before the timer // fires. Alternatively, if the request rate is lower the buffer will not be filled as often // before the timer fires. // If `max_buffer_size_before_flush` is set, but `buffer_flush_timeout` is not, the latter // defaults to 3ms. google.protobuf.Duration buffer_flush_timeout = 5; // `max_upstream_unknown_connections` controls how many upstream connections to unknown hosts // can be created at any given time by any given worker thread (see `enable_redirection` for // more details). If the host is unknown and a connection cannot be created due to enforcing // this limit, then redirection will fail and the original redirection error will be passed // downstream unchanged. This limit defaults to 100. google.protobuf.UInt32Value max_upstream_unknown_connections = 6; // Enable per-command statistics per upstream cluster, in addition to the filter level aggregate // count. These commands are measured in microseconds. bool enable_command_stats = 8; // Read policy. The default is to read from the primary. ReadPolicy read_policy = 7 [(validate.rules).enum = {defined_only: true}]; } message PrefixRoutes { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes"; message Route { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route"; // The router is capable of shadowing traffic from one cluster to another. The current // implementation is "fire and forget," meaning Envoy will not wait for the shadow cluster to // respond before returning the response from the primary cluster. All normal statistics are // collected for the shadow cluster making this feature useful for testing. message RequestMirrorPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProxy.PrefixRoutes.Route." "RequestMirrorPolicy"; // Specifies the cluster that requests will be mirrored to. The cluster must // exist in the cluster manager configuration. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // If not specified or the runtime key is not present, all requests to the target cluster // will be mirrored. // // If specified, Envoy will lookup the runtime key to get the percentage of requests to the // mirror. config.core.v3.RuntimeFractionalPercent runtime_fraction = 2; // Set this to TRUE to only mirror write commands, this is effectively replicating the // writes in a "fire and forget" manner. bool exclude_read_commands = 3; } // String prefix that must match the beginning of the keys. Envoy will always favor the // longest match. string prefix = 1 [(validate.rules).string = {max_bytes: 1000}]; // Indicates if the prefix needs to be removed from the key when forwarded. bool remove_prefix = 2; // Upstream cluster to forward the command to. string cluster = 3 [(validate.rules).string = {min_len: 1}]; // Indicates that the route has a request mirroring policy. repeated RequestMirrorPolicy request_mirror_policy = 4; } reserved 3; reserved "catch_all_cluster"; // List of prefix routes. repeated Route routes = 1; // Indicates that prefix matching should be case insensitive. bool case_insensitive = 2; // Optional catch-all route to forward commands that doesn't match any of the routes. The // catch-all route becomes required when no routes are specified. Route catch_all_route = 4; } // RedisFault defines faults used for fault injection. message RedisFault { enum RedisFaultType { // Delays requests. This is the base fault; other faults can have delays added. DELAY = 0; // Returns errors on requests. ERROR = 1; } // Fault type. RedisFaultType fault_type = 1 [(validate.rules).enum = {defined_only: true}]; // Percentage of requests fault applies to. config.core.v3.RuntimeFractionalPercent fault_enabled = 2 [(validate.rules).message = {required: true}]; // Delay for all faults. If not set, defaults to zero google.protobuf.Duration delay = 3; // Commands fault is restricted to, if any. If not set, fault applies to all commands // other than auth and ping (due to special handling of those commands in Envoy). repeated string commands = 4; } reserved 2; reserved "cluster"; // The prefix to use when emitting :ref:`statistics `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Network settings for the connection pool to the upstream clusters. ConnPoolSettings settings = 3 [(validate.rules).message = {required: true}]; // Indicates that latency stat should be computed in microseconds. By default it is computed in // milliseconds. This does not apply to upstream command stats currently. bool latency_in_micros = 4; // List of **unique** prefixes used to separate keys from different workloads to different // clusters. Envoy will always favor the longest match first in case of overlap. A catch-all // cluster can be used to forward commands when there is no match. Time complexity of the // lookups are in O(min(longest key prefix, key length)). // // Example: // // .. code-block:: yaml // // prefix_routes: // routes: // - prefix: "ab" // cluster: "cluster_a" // - prefix: "abc" // cluster: "cluster_b" // // When using the above routes, the following prefixes would be sent to: // // * ``get abc:users`` would retrieve the key 'abc:users' from cluster_b. // * ``get ab:users`` would retrieve the key 'ab:users' from cluster_a. // * ``get z:users`` would return a NoUpstreamHost error. A :ref:`catch-all // route` // would have retrieved the key from that cluster instead. // // See the :ref:`configuration section // ` of the architecture overview for recommendations on // configuring the backing clusters. PrefixRoutes prefix_routes = 5; // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis // AUTH command `_ with this password before enabling any other // command. If an AUTH command's password matches this password, an "OK" response will be returned // to the client. If the AUTH command password does not match this password, then an "ERR invalid // password" error will be returned. If any other command is received before AUTH when this // password is set, then a "NOAUTH Authentication required." error response will be sent to the // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no password is set" error will be returned. config.core.v3.DataSource downstream_auth_password = 6 [(udpa.annotations.sensitive) = true]; // List of faults to inject. Faults currently come in two flavors: // - Delay, which delays a request. // - Error, which responds to a request with an error. Errors can also have delays attached. // // Example: // // .. code-block:: yaml // // faults: // - fault_type: ERROR // fault_enabled: // default_value: // numerator: 10 // denominator: HUNDRED // runtime_key: "bogus_key" // commands: // - GET // - fault_type: DELAY // fault_enabled: // default_value: // numerator: 10 // denominator: HUNDRED // runtime_key: "bogus_key" // delay: 2s // // See the :ref:`fault injection section // ` for more information on how to configure this. repeated RedisFault faults = 8; // If a username is provided an ACL style AUTH command will be required with a username and password. // Authenticate Redis client connections locally by forcing downstream clients to issue a `Redis // AUTH command `_ with this username and the *downstream_auth_password* // before enabling any other command. If an AUTH command's username and password matches this username // and the *downstream_auth_password* , an "OK" response will be returned to the client. If the AUTH // command username or password does not match this username or the *downstream_auth_password*, then an // "WRONGPASS invalid username-password pair" error will be returned. If any other command is received before AUTH when this // password is set, then a "NOAUTH Authentication required." error response will be sent to the // client. If an AUTH command is received when the password is not set, then an "ERR Client sent // AUTH, but no ACL is set" error will be returned. config.core.v3.DataSource downstream_auth_username = 7 [(udpa.annotations.sensitive) = true]; } // RedisProtocolOptions specifies Redis upstream protocol options. This object is used in // :ref:`typed_extension_protocol_options`, // keyed by the name `envoy.filters.network.redis_proxy`. message RedisProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.redis_proxy.v2.RedisProtocolOptions"; // Upstream server password as defined by the `requirepass` directive // `_ in the server's configuration file. config.core.v3.DataSource auth_password = 1 [(udpa.annotations.sensitive) = true]; // Upstream server username as defined by the `user` directive // `_ in the server's configuration file. config.core.v3.DataSource auth_username = 2 [(udpa.annotations.sensitive) = true]; } ================================================ FILE: api/envoy/extensions/filters/network/rocketmq_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/route/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/rocketmq_proxy/v3/README.md ================================================ Protocol buffer definitions for the Rocketmq proxy. ================================================ FILE: api/envoy/extensions/filters/network/rocketmq_proxy/v3/rocketmq_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.rocketmq_proxy.v3; import "envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; option java_outer_classname = "RocketmqProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RocketMQ Proxy] // RocketMQ Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.rocketmq_proxy] message RocketmqProxy { // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is specified in this property. RouteConfiguration route_config = 2; // The largest duration transient object expected to live, more than 10s is recommended. google.protobuf.Duration transient_object_life_span = 3; // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting // facility without considering backward compatibility of exiting RocketMQ client SDK. bool develop_mode = 4; } ================================================ FILE: api/envoy/extensions/filters/network/rocketmq_proxy/v3/route.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.rocketmq_proxy.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/route/v3/route_components.proto"; import "envoy/type/matcher/v3/string.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rocketmq Proxy Route Configuration] // Rocketmq Proxy :ref:`configuration overview `. message RouteConfiguration { // The name of the route configuration. string name = 1; // The list of routes that will be matched, in order, against incoming requests. The first route // that matches will be used. repeated Route routes = 2; } message Route { // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. RouteAction route = 2 [(validate.rules).message = {required: true}]; } message RouteMatch { // The name of the topic. type.matcher.v3.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; // Specifies a set of headers that the route should match on. The router will check the request’s // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). repeated config.route.v3.HeaderMatcher headers = 2; } message RouteAction { // Indicates the upstream cluster to which the request should be routed. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. config.core.v3.Metadata metadata_match = 2; } ================================================ FILE: api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/rocketmq_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; import "envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; option java_outer_classname = "RocketmqProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: RocketMQ Proxy] // RocketMQ Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.rocketmq_proxy] message RocketmqProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.rocketmq_proxy.v3.RocketmqProxy"; // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is specified in this property. RouteConfiguration route_config = 2; // The largest duration transient object expected to live, more than 10s is recommended. google.protobuf.Duration transient_object_life_span = 3; // If develop_mode is enabled, this proxy plugin may work without dedicated traffic intercepting // facility without considering backward compatibility of exiting RocketMQ client SDK. bool develop_mode = 4; } ================================================ FILE: api/envoy/extensions/filters/network/rocketmq_proxy/v4alpha/route.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.rocketmq_proxy.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.rocketmq_proxy.v4alpha"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Rocketmq Proxy Route Configuration] // Rocketmq Proxy :ref:`configuration overview `. message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteConfiguration"; // The name of the route configuration. string name = 1; // The list of routes that will be matched, in order, against incoming requests. The first route // that matches will be used. repeated Route routes = 2; } message Route { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.rocketmq_proxy.v3.Route"; // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. RouteAction route = 2 [(validate.rules).message = {required: true}]; } message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteMatch"; // The name of the topic. type.matcher.v4alpha.StringMatcher topic = 1 [(validate.rules).message = {required: true}]; // Specifies a set of headers that the route should match on. The router will check the request’s // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). repeated config.route.v4alpha.HeaderMatcher headers = 2; } message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.rocketmq_proxy.v3.RouteAction"; // Indicates the upstream cluster to which the request should be routed. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. config.core.v4alpha.Metadata metadata_match = 2; } ================================================ FILE: api/envoy/extensions/filters/network/sni_cluster/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/network/sni_cluster/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/sni_cluster/v3/sni_cluster.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.sni_cluster.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_cluster.v3"; option java_outer_classname = "SniClusterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SNI Cluster Filter] // Set the upstream cluster name from the SNI field in the TLS connection. // [#extension: envoy.filters.network.sni_cluster] message SniCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.sni_cluster.v2.SniCluster"; } ================================================ FILE: api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha/sni_dynamic_forward_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha; import "envoy/extensions/common/dynamic_forward_proxy/v3/dns_cache.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.sni_dynamic_forward_proxy.v3alpha"; option java_outer_classname = "SniDynamicForwardProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SNI dynamic forward proxy] // Configuration for the SNI-based dynamic forward proxy filter. See the // :ref:`architecture overview ` for // more information. Note this filter must be configured along with // :ref:`TLS inspector listener filter ` // to work. // [#extension: envoy.filters.network.sni_dynamic_forward_proxy] message FilterConfig { // The DNS cache configuration that the filter will attach to. Note this // configuration must match that of associated :ref:`dynamic forward proxy // cluster configuration // `. common.dynamic_forward_proxy.v3.DnsCacheConfig dns_cache_config = 1 [(validate.rules).message = {required: true}]; oneof port_specifier { // The port number to connect to the upstream. uint32 port_value = 2 [(validate.rules).uint32 = {lte: 65535 gt: 0}]; } } ================================================ FILE: api/envoy/extensions/filters/network/tcp_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/tcp_proxy/v3/tcp_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.tcp_proxy.v3; import "envoy/config/accesslog/v3/accesslog.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/type/v3/hash_policy.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v3"; option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: TCP Proxy] // TCP Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.tcp_proxy] // [#next-free-field: 14] message TcpProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.tcp_proxy.v2.TcpProxy"; // Allows for specification of multiple upstream clusters along with weights // that indicate the percentage of traffic to be forwarded to each cluster. // The router selects an upstream cluster based on these weights. message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster"; message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the // clusters array determines the total weight. uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what is set in this field will be considered // for load balancing. Note that this will be merged with what's provided in // :ref:`TcpProxy.metadata_match // `, with values // here taking precedence. The filter name should be specified as *envoy.lb*. config.core.v3.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } // Configuration for tunneling TCP over other transports or application layers. // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will // remain the default. message TunnelingConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.tcp_proxy.v2.TcpProxy.TunnelingConfig"; // The hostname to send in the synthesized CONNECT headers to the upstream proxy. string hostname = 1 [(validate.rules).string = {min_len: 1}]; } reserved 6; reserved "deprecated_v1"; // The prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; oneof cluster_specifier { option (validate.required) = true; // The upstream cluster to connect to. string cluster = 2; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. WeightedCluster weighted_clusters = 10; } // Optional endpoint metadata match criteria. Only endpoints in the upstream // cluster with metadata matching that set in metadata_match will be // considered. The filter name should be specified as *envoy.lb*. config.core.v3.Metadata metadata_match = 9; // The idle timeout for connections managed by the TCP proxy filter. The idle timeout // is defined as the period in which there are no bytes sent or received on either // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set // to 0s, the timeout will be disabled. // // .. warning:: // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP // FIN packets, etc. google.protobuf.Duration idle_timeout = 8; // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy // filter. The idle timeout is defined as the period in which there is no // active traffic. If not set, there is no idle timeout. When the idle timeout // is reached the connection will be closed. The distinction between // downstream_idle_timeout/upstream_idle_timeout provides a means to set // timeout based on the last byte sent on the downstream/upstream connection. google.protobuf.Duration downstream_idle_timeout = 3; // [#not-implemented-hide:] google.protobuf.Duration upstream_idle_timeout = 4; // Configuration for :ref:`access logs ` // emitted by the this tcp_proxy. repeated config.accesslog.v3.AccessLog access_log = 5; // The maximum number of unsuccessful connection attempts that will be made before // giving up. If the parameter is not specified, 1 connection attempt will be made. google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based // load balancing algorithms will select a host randomly. Currently the number of hash policies is // limited to 1. repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; // [#not-implemented-hide:] feature in progress // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload // will be proxied upstream as per usual. TunnelingConfig tunneling_config = 12; // The maximum duration of a connection. The duration is defined as the period since a connection // was established. If not set, there is no max duration. When max_downstream_connection_duration // is reached the connection will be closed. Duration must be at least 1ms. google.protobuf.Duration max_downstream_connection_duration = 13 [(validate.rules).duration = {gte {nanos: 1000000}}]; } ================================================ FILE: api/envoy/extensions/filters/network/tcp_proxy/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/accesslog/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/tcp_proxy/v4alpha/tcp_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.tcp_proxy.v4alpha; import "envoy/config/accesslog/v4alpha/accesslog.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/type/v3/hash_policy.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.tcp_proxy.v4alpha"; option java_outer_classname = "TcpProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: TCP Proxy] // TCP Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.tcp_proxy] // [#next-free-field: 14] message TcpProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy"; // Allows for specification of multiple upstream clusters along with weights // that indicate the percentage of traffic to be forwarded to each cluster. // The router selects an upstream cluster based on these weights. message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster"; message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is // determined by its weight. The sum of weights across all entries in the // clusters array determines the total weight. uint32 weight = 2 [(validate.rules).uint32 = {gte: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints // in the upstream cluster with metadata matching what is set in this field will be considered // for load balancing. Note that this will be merged with what's provided in // :ref:`TcpProxy.metadata_match // `, with values // here taking precedence. The filter name should be specified as *envoy.lb*. config.core.v4alpha.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } // Configuration for tunneling TCP over other transports or application layers. // Currently, only HTTP/2 is supported. When other options exist, HTTP/2 will // remain the default. message TunnelingConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy.TunnelingConfig"; // The hostname to send in the synthesized CONNECT headers to the upstream proxy. string hostname = 1 [(validate.rules).string = {min_len: 1}]; } reserved 6; reserved "deprecated_v1"; // The prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; oneof cluster_specifier { option (validate.required) = true; // The upstream cluster to connect to. string cluster = 2; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. WeightedCluster weighted_clusters = 10; } // Optional endpoint metadata match criteria. Only endpoints in the upstream // cluster with metadata matching that set in metadata_match will be // considered. The filter name should be specified as *envoy.lb*. config.core.v4alpha.Metadata metadata_match = 9; // The idle timeout for connections managed by the TCP proxy filter. The idle timeout // is defined as the period in which there are no bytes sent or received on either // the upstream or downstream connection. If not set, the default idle timeout is 1 hour. If set // to 0s, the timeout will be disabled. // // .. warning:: // Disabling this timeout has a highly likelihood of yielding connection leaks due to lost TCP // FIN packets, etc. google.protobuf.Duration idle_timeout = 8; // [#not-implemented-hide:] The idle timeout for connections managed by the TCP proxy // filter. The idle timeout is defined as the period in which there is no // active traffic. If not set, there is no idle timeout. When the idle timeout // is reached the connection will be closed. The distinction between // downstream_idle_timeout/upstream_idle_timeout provides a means to set // timeout based on the last byte sent on the downstream/upstream connection. google.protobuf.Duration downstream_idle_timeout = 3; // [#not-implemented-hide:] google.protobuf.Duration upstream_idle_timeout = 4; // Configuration for :ref:`access logs ` // emitted by the this tcp_proxy. repeated config.accesslog.v4alpha.AccessLog access_log = 5; // The maximum number of unsuccessful connection attempts that will be made before // giving up. If the parameter is not specified, 1 connection attempt will be made. google.protobuf.UInt32Value max_connect_attempts = 7 [(validate.rules).uint32 = {gte: 1}]; // Optional configuration for TCP proxy hash policy. If hash_policy is not set, the hash-based // load balancing algorithms will select a host randomly. Currently the number of hash policies is // limited to 1. repeated type.v3.HashPolicy hash_policy = 11 [(validate.rules).repeated = {max_items: 1}]; // [#not-implemented-hide:] feature in progress // If set, this configures tunneling, e.g. configuration options to tunnel multiple TCP // payloads over a shared HTTP/2 tunnel. If this message is absent, the payload // will be proxied upstream as per usual. TunnelingConfig tunneling_config = 12; // The maximum duration of a connection. The duration is defined as the period since a connection // was established. If not set, there is no max duration. When max_downstream_connection_duration // is reached the connection will be closed. Duration must be at least 1ms. google.protobuf.Duration max_downstream_connection_duration = 13 [(validate.rules).duration = {gte {nanos: 1000000}}]; } ================================================ FILE: api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", "//envoy/config/ratelimit/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3/rate_limit.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3; import "envoy/config/ratelimit/v3/rls.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.filters.ratelimit.v3"; option java_outer_classname = "RateLimitProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate limit] // Rate limit :ref:`configuration overview `. // [#extension: envoy.filters.thrift.ratelimit] // [#next-free-field: 6] message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.thrift.rate_limit.v2alpha1.RateLimit"; // The rate limit domain to use in the rate limit service request. string domain = 1 [(validate.rules).string = {min_len: 1}]; // Specifies the rate limit configuration stage. Each configured rate limit filter performs a // rate limit check using descriptors configured in the // :ref:`envoy_api_msg_extensions.filters.network.thrift_proxy.v3.RouteAction` for the request. // Only those entries with a matching stage number are used for a given filter. If not set, the // default stage number is 0. // // .. note:: // // The filter supports a range of 0 - 10 inclusively for stage numbers. uint32 stage = 2 [(validate.rules).uint32 = {lte: 10}]; // The timeout in milliseconds for the rate limit service RPC. If not // set, this defaults to 20ms. google.protobuf.Duration timeout = 3; // The filter's behaviour in case the rate limiting service does // not respond back. When it is set to true, Envoy will not allow traffic in case of // communication failure between rate limiting service and the proxy. // Defaults to false. bool failure_mode_deny = 4; // Configuration for an external rate limit service provider. If not // specified, any calls to the rate limit service will immediately return // success. config.ratelimit.v3.RateLimitServiceConfig rate_limit_service = 5 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/extensions/filters/network/thrift_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", "//envoy/config/route/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/thrift_proxy/v3/README.md ================================================ Protocol buffer definitions for the Thrift proxy. ================================================ FILE: api/envoy/extensions/filters/network/thrift_proxy/v3/route.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.thrift_proxy.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/route/v3/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Thrift Proxy Route Configuration] // Thrift Proxy :ref:`configuration overview `. message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteConfiguration"; // The name of the route configuration. Reserved for future use in asynchronous route discovery. string name = 1; // The list of routes that will be matched, in order, against incoming requests. The first route // that matches will be used. repeated Route routes = 2; } message Route { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.Route"; // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. RouteAction route = 2 [(validate.rules).message = {required: true}]; } message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteMatch"; oneof match_specifier { option (validate.required) = true; // If specified, the route must exactly match the request method name. As a special case, an // empty string matches any request method name. string method_name = 1; // If specified, the route must have the service name as the request method name prefix. As a // special case, an empty string matches any service name. Only relevant when service // multiplexing. string service_name = 2; } // Inverts whatever matching is done in the :ref:`method_name // ` or // :ref:`service_name // ` fields. // Cannot be combined with wildcard matching as that would result in routes never being matched. // // .. note:: // // This does not invert matching done as part of the :ref:`headers field // ` field. To // invert header matching, see :ref:`invert_match // `. bool invert = 3; // Specifies a set of headers that the route should match on. The router will check the request’s // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). Note that this only applies for Thrift transports and/or // protocols that support headers. repeated config.route.v3.HeaderMatcher headers = 4; } // [#next-free-field: 7] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.RouteAction"; oneof cluster_specifier { option (validate.required) = true; // Indicates a single upstream cluster to which the request should be routed // to. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. WeightedCluster weighted_clusters = 2; // Envoy will determine the cluster to route to by reading the value of the // Thrift header named by cluster_header from the request headers. If the // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. string cluster_header = 6 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field will be considered. // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match // `, // with values there taking precedence. Keys and values should be provided under the "envoy.lb" // metadata key. config.core.v3.Metadata metadata_match = 3; // Specifies a set of rate limit configurations that could be applied to the route. // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders // action with the header name ":method-name". repeated config.route.v3.RateLimit rate_limits = 4; // Strip the service prefix from the method name, if there's a prefix. For // example, the method call Service:method would end up being just method. bool strip_service_name = 5; } // Allows for specification of multiple upstream clusters along with weights that indicate the // percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster // based on these weights. message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster"; message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total // weight. google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field, combined with what's // provided in :ref:`RouteAction's metadata_match // `, // will be considered. Values here will take precedence. Keys and values should be provided // under the "envoy.lb" metadata key. config.core.v3.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/extensions/filters/network/thrift_proxy/v3/thrift_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.thrift_proxy.v3; import "envoy/extensions/filters/network/thrift_proxy/v3/route.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v3"; option java_outer_classname = "ThriftProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.thrift_proxy] // Thrift transport types supported by Envoy. enum TransportType { // For downstream connections, the Thrift proxy will attempt to determine which transport to use. // For upstream connections, the Thrift proxy will use same transport as the downstream // connection. AUTO_TRANSPORT = 0; // The Thrift proxy will use the Thrift framed transport. FRAMED = 1; // The Thrift proxy will use the Thrift unframed transport. UNFRAMED = 2; // The Thrift proxy will assume the client is using the Thrift header transport. HEADER = 3; } // Thrift Protocol types supported by Envoy. enum ProtocolType { // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol // detection. For upstream connections, the Thrift proxy will use the same protocol as the // downstream connection. AUTO_PROTOCOL = 0; // The Thrift proxy will use the Thrift binary protocol. BINARY = 1; // The Thrift proxy will use Thrift non-strict binary protocol. LAX_BINARY = 2; // The Thrift proxy will use the Thrift compact protocol. COMPACT = 3; // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. TWITTER = 4; } // [#next-free-field: 6] message ThriftProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProxy"; // Supplies the type of transport that the Thrift proxy should use. Defaults to // :ref:`AUTO_TRANSPORT`. TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; // Supplies the type of protocol that the Thrift proxy should use. Defaults to // :ref:`AUTO_PROTOCOL`. ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is static and is specified in this property. RouteConfiguration route_config = 4; // A list of individual Thrift filters that make up the filter chain for requests made to the // Thrift proxy. Order matters as the filters are processed sequentially. For backwards // compatibility, if no thrift_filters are specified, a default Thrift router filter // (`envoy.filters.thrift.router`) is used. repeated ThriftFilter thrift_filters = 5; } // ThriftFilter configures a Thrift filter. message ThriftFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftFilter"; reserved 2; reserved "config"; // The name of the filter to instantiate. The name must match a supported // filter. The built-in filters are: // // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 3; } } // ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in // in // :ref:`typed_extension_protocol_options`, // keyed by the name `envoy.filters.network.thrift_proxy`. message ThriftProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.thrift_proxy.v2alpha1.ThriftProtocolOptions"; // Supplies the type of transport that the Thrift proxy should use for upstream connections. // Selecting // :ref:`AUTO_TRANSPORT`, // which is the default, causes the proxy to use the same transport as the downstream connection. TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; // Supplies the type of protocol that the Thrift proxy should use for upstream connections. // Selecting // :ref:`AUTO_PROTOCOL`, // which is the default, causes the proxy to use the same protocol as the downstream connection. ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/extensions/filters/network/thrift_proxy/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/route/v4alpha:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.thrift_proxy.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/route/v4alpha/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; option java_outer_classname = "RouteProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Thrift Proxy Route Configuration] // Thrift Proxy :ref:`configuration overview `. message RouteConfiguration { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.thrift_proxy.v3.RouteConfiguration"; // The name of the route configuration. Reserved for future use in asynchronous route discovery. string name = 1; // The list of routes that will be matched, in order, against incoming requests. The first route // that matches will be used. repeated Route routes = 2; } message Route { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.thrift_proxy.v3.Route"; // Route matching parameters. RouteMatch match = 1 [(validate.rules).message = {required: true}]; // Route request to some upstream cluster. RouteAction route = 2 [(validate.rules).message = {required: true}]; } message RouteMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.thrift_proxy.v3.RouteMatch"; oneof match_specifier { option (validate.required) = true; // If specified, the route must exactly match the request method name. As a special case, an // empty string matches any request method name. string method_name = 1; // If specified, the route must have the service name as the request method name prefix. As a // special case, an empty string matches any service name. Only relevant when service // multiplexing. string service_name = 2; } // Inverts whatever matching is done in the :ref:`method_name // ` or // :ref:`service_name // ` fields. // Cannot be combined with wildcard matching as that would result in routes never being matched. // // .. note:: // // This does not invert matching done as part of the :ref:`headers field // ` field. To // invert header matching, see :ref:`invert_match // `. bool invert = 3; // Specifies a set of headers that the route should match on. The router will check the request’s // headers against all the specified headers in the route config. A match will happen if all the // headers in the route are present in the request with the same values (or based on presence if // the value field is not in the config). Note that this only applies for Thrift transports and/or // protocols that support headers. repeated config.route.v4alpha.HeaderMatcher headers = 4; } // [#next-free-field: 7] message RouteAction { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.thrift_proxy.v3.RouteAction"; oneof cluster_specifier { option (validate.required) = true; // Indicates a single upstream cluster to which the request should be routed // to. string cluster = 1 [(validate.rules).string = {min_len: 1}]; // Multiple upstream clusters can be specified for a given route. The // request is routed to one of the upstream clusters based on weights // assigned to each cluster. WeightedCluster weighted_clusters = 2; // Envoy will determine the cluster to route to by reading the value of the // Thrift header named by cluster_header from the request headers. If the // header is not found or the referenced cluster does not exist Envoy will // respond with an unknown method exception or an internal error exception, // respectively. string cluster_header = 6 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_VALUE strict: false}]; } // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field will be considered. // Note that this will be merged with what's provided in :ref:`WeightedCluster.metadata_match // `, // with values there taking precedence. Keys and values should be provided under the "envoy.lb" // metadata key. config.core.v4alpha.Metadata metadata_match = 3; // Specifies a set of rate limit configurations that could be applied to the route. // N.B. Thrift service or method name matching can be achieved by specifying a RequestHeaders // action with the header name ":method-name". repeated config.route.v4alpha.RateLimit rate_limits = 4; // Strip the service prefix from the method name, if there's a prefix. For // example, the method call Service:method would end up being just method. bool strip_service_name = 5; } // Allows for specification of multiple upstream clusters along with weights that indicate the // percentage of traffic to be forwarded to each cluster. The router selects an upstream cluster // based on these weights. message WeightedCluster { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster"; message ClusterWeight { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.thrift_proxy.v3.WeightedCluster.ClusterWeight"; // Name of the upstream cluster. string name = 1 [(validate.rules).string = {min_len: 1}]; // When a request matches the route, the choice of an upstream cluster is determined by its // weight. The sum of weights across all entries in the clusters array determines the total // weight. google.protobuf.UInt32Value weight = 2 [(validate.rules).uint32 = {gte: 1}]; // Optional endpoint metadata match criteria used by the subset load balancer. Only endpoints in // the upstream cluster with metadata matching what is set in this field, combined with what's // provided in :ref:`RouteAction's metadata_match // `, // will be considered. Values here will take precedence. Keys and values should be provided // under the "envoy.lb" metadata key. config.core.v4alpha.Metadata metadata_match = 3; } // Specifies one or more upstream clusters associated with the route. repeated ClusterWeight clusters = 1 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/extensions/filters/network/thrift_proxy/v4alpha/thrift_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.thrift_proxy.v4alpha; import "envoy/extensions/filters/network/thrift_proxy/v4alpha/route.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.thrift_proxy.v4alpha"; option java_outer_classname = "ThriftProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Thrift Proxy] // Thrift Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.thrift_proxy] // Thrift transport types supported by Envoy. enum TransportType { // For downstream connections, the Thrift proxy will attempt to determine which transport to use. // For upstream connections, the Thrift proxy will use same transport as the downstream // connection. AUTO_TRANSPORT = 0; // The Thrift proxy will use the Thrift framed transport. FRAMED = 1; // The Thrift proxy will use the Thrift unframed transport. UNFRAMED = 2; // The Thrift proxy will assume the client is using the Thrift header transport. HEADER = 3; } // Thrift Protocol types supported by Envoy. enum ProtocolType { // For downstream connections, the Thrift proxy will attempt to determine which protocol to use. // Note that the older, non-strict (or lax) binary protocol is not included in automatic protocol // detection. For upstream connections, the Thrift proxy will use the same protocol as the // downstream connection. AUTO_PROTOCOL = 0; // The Thrift proxy will use the Thrift binary protocol. BINARY = 1; // The Thrift proxy will use Thrift non-strict binary protocol. LAX_BINARY = 2; // The Thrift proxy will use the Thrift compact protocol. COMPACT = 3; // The Thrift proxy will use the Thrift "Twitter" protocol implemented by the finagle library. TWITTER = 4; } // [#next-free-field: 6] message ThriftProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProxy"; // Supplies the type of transport that the Thrift proxy should use. Defaults to // :ref:`AUTO_TRANSPORT`. TransportType transport = 2 [(validate.rules).enum = {defined_only: true}]; // Supplies the type of protocol that the Thrift proxy should use. Defaults to // :ref:`AUTO_PROTOCOL`. ProtocolType protocol = 3 [(validate.rules).enum = {defined_only: true}]; // The human readable prefix to use when emitting statistics. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // The route table for the connection manager is static and is specified in this property. RouteConfiguration route_config = 4; // A list of individual Thrift filters that make up the filter chain for requests made to the // Thrift proxy. Order matters as the filters are processed sequentially. For backwards // compatibility, if no thrift_filters are specified, a default Thrift router filter // (`envoy.filters.thrift.router`) is used. repeated ThriftFilter thrift_filters = 5; } // ThriftFilter configures a Thrift filter. message ThriftFilter { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.thrift_proxy.v3.ThriftFilter"; reserved 2; reserved "config"; // The name of the filter to instantiate. The name must match a supported // filter. The built-in filters are: // // [#comment:TODO(zuercher): Auto generate the following list] // * :ref:`envoy.filters.thrift.router ` // * :ref:`envoy.filters.thrift.rate_limit ` string name = 1 [(validate.rules).string = {min_len: 1}]; // Filter specific configuration which depends on the filter being instantiated. See the supported // filters for further documentation. oneof config_type { google.protobuf.Any typed_config = 3; } } // ThriftProtocolOptions specifies Thrift upstream protocol options. This object is used in // in // :ref:`typed_extension_protocol_options`, // keyed by the name `envoy.filters.network.thrift_proxy`. message ThriftProtocolOptions { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.network.thrift_proxy.v3.ThriftProtocolOptions"; // Supplies the type of transport that the Thrift proxy should use for upstream connections. // Selecting // :ref:`AUTO_TRANSPORT`, // which is the default, causes the proxy to use the same transport as the downstream connection. TransportType transport = 1 [(validate.rules).enum = {defined_only: true}]; // Supplies the type of protocol that the Thrift proxy should use for upstream connections. // Selecting // :ref:`AUTO_PROTOCOL`, // which is the default, causes the proxy to use the same protocol as the downstream connection. ProtocolType protocol = 2 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/extensions/filters/network/wasm/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/extensions/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/wasm/v3/wasm.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.wasm.v3; import "envoy/extensions/wasm/v3/wasm.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] // [#extension: envoy.filters.network.wasm] // Wasm :ref:`configuration overview `. message Wasm { // General Plugin configuration. envoy.extensions.wasm.v3.PluginConfig config = 1; } ================================================ FILE: api/envoy/extensions/filters/network/zookeeper_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/network/zookeeper_proxy/v3/zookeeper_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.network.zookeeper_proxy.v3; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.network.zookeeper_proxy.v3"; option java_outer_classname = "ZookeeperProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: ZooKeeper proxy] // ZooKeeper Proxy :ref:`configuration overview `. // [#extension: envoy.filters.network.zookeeper_proxy] message ZooKeeperProxy { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.network.zookeeper_proxy.v1alpha1.ZooKeeperProxy"; // The human readable prefix to use when emitting :ref:`statistics // `. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // [#not-implemented-hide:] The optional path to use for writing ZooKeeper access logs. // If the access log field is empty, access logs will not be written. string access_log = 2; // Messages — requests, responses and events — that are bigger than this value will // be ignored. If it is not set, the default value is 1Mb. // // The value here should match the jute.maxbuffer property in your cluster configuration: // // https://zookeeper.apache.org/doc/r3.4.10/zookeeperAdmin.html#Unsafe+Options // // if that is set. If it isn't, ZooKeeper's default is also 1Mb. google.protobuf.UInt32Value max_packet_bytes = 3; } ================================================ FILE: api/envoy/extensions/filters/udp/dns_filter/v3alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/data/dns/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/udp/dns_filter/v3alpha/dns_filter.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.udp.dns_filter.v3alpha; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/data/dns/v3/dns_table.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v3alpha"; option java_outer_classname = "DnsFilterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: DNS Filter] // DNS Filter :ref:`configuration overview `. // [#extension: envoy.filters.udp_listener.dns_filter] // Configuration for the DNS filter. message DnsFilterConfig { // This message contains the configuration for the DNS Filter operating // in a server context. This message will contain the virtual hosts and // associated addresses with which Envoy will respond to queries message ServerContextConfig { oneof config_source { option (validate.required) = true; // Load the configuration specified from the control plane data.dns.v3.DnsTable inline_dns_table = 1; // Seed the filter configuration from an external path. This source // is a yaml formatted file that contains the DnsTable driving Envoy's // responses to DNS queries config.core.v3.DataSource external_dns_table = 2; } } // This message contains the configuration for the DNS Filter operating // in a client context. This message will contain the timeouts, retry, // and forwarding configuration for Envoy to make DNS requests to other // resolvers message ClientContextConfig { // Sets the maximum time we will wait for the upstream query to complete // We allow 5s for the upstream resolution to complete, so the minimum // value here is 1. Note that the total latency for a failed query is the // number of retries multiplied by the resolver_timeout. google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; // A list of DNS servers to which we can forward queries. If not // specified, Envoy will use the ambient DNS resolvers in the // system. repeated config.core.v3.Address upstream_resolvers = 2; // Controls how many outstanding external lookup contexts the filter tracks. // The context structure allows the filter to respond to every query even if the external // resolution times out or is otherwise unsuccessful uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; } // The stat prefix used when emitting DNS filter statistics string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Server context configuration contains the data that the filter uses to respond // to DNS requests. ServerContextConfig server_config = 2; // Client context configuration controls Envoy's behavior when it must use external // resolvers to answer a query. This object is optional and if omitted instructs // the filter to resolve queries from the data in the server_config ClientContextConfig client_config = 3; } ================================================ FILE: api/envoy/extensions/filters/udp/dns_filter/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/data/dns/v4alpha:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/udp/dns_filter/v4alpha/dns_filter.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.udp.dns_filter.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/data/dns/v4alpha/dns_table.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.udp.dns_filter.v4alpha"; option java_outer_classname = "DnsFilterProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: DNS Filter] // DNS Filter :ref:`configuration overview `. // [#extension: envoy.filters.udp_listener.dns_filter] // Configuration for the DNS filter. message DnsFilterConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig"; // This message contains the configuration for the DNS Filter operating // in a server context. This message will contain the virtual hosts and // associated addresses with which Envoy will respond to queries message ServerContextConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ServerContextConfig"; oneof config_source { option (validate.required) = true; // Load the configuration specified from the control plane data.dns.v4alpha.DnsTable inline_dns_table = 1; // Seed the filter configuration from an external path. This source // is a yaml formatted file that contains the DnsTable driving Envoy's // responses to DNS queries config.core.v4alpha.DataSource external_dns_table = 2; } } // This message contains the configuration for the DNS Filter operating // in a client context. This message will contain the timeouts, retry, // and forwarding configuration for Envoy to make DNS requests to other // resolvers message ClientContextConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.filters.udp.dns_filter.v3alpha.DnsFilterConfig.ClientContextConfig"; // Sets the maximum time we will wait for the upstream query to complete // We allow 5s for the upstream resolution to complete, so the minimum // value here is 1. Note that the total latency for a failed query is the // number of retries multiplied by the resolver_timeout. google.protobuf.Duration resolver_timeout = 1 [(validate.rules).duration = {gte {seconds: 1}}]; // A list of DNS servers to which we can forward queries. If not // specified, Envoy will use the ambient DNS resolvers in the // system. repeated config.core.v4alpha.Address upstream_resolvers = 2; // Controls how many outstanding external lookup contexts the filter tracks. // The context structure allows the filter to respond to every query even if the external // resolution times out or is otherwise unsuccessful uint64 max_pending_lookups = 3 [(validate.rules).uint64 = {gte: 1}]; } // The stat prefix used when emitting DNS filter statistics string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; // Server context configuration contains the data that the filter uses to respond // to DNS requests. ServerContextConfig server_config = 2; // Client context configuration controls Envoy's behavior when it must use external // resolvers to answer a query. This object is optional and if omitted instructs // the filter to resolve queries from the data in the server_config ClientContextConfig client_config = 3; } ================================================ FILE: api/envoy/extensions/filters/udp/udp_proxy/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/filters/udp/udp_proxy/v3/udp_proxy.proto ================================================ syntax = "proto3"; package envoy.extensions.filters.udp.udp_proxy.v3; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.filters.udp.udp_proxy.v3"; option java_outer_classname = "UdpProxyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: UDP proxy] // UDP proxy :ref:`configuration overview `. // [#extension: envoy.filters.udp_listener.udp_proxy] // Configuration for the UDP proxy filter. // [#next-free-field: 6] message UdpProxyConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.filter.udp.udp_proxy.v2alpha.UdpProxyConfig"; // Specifies the UDP hash policy. // The packets can be routed by hash policy. message HashPolicy { oneof policy_specifier { option (validate.required) = true; // The source IP will be used to compute the hash used by hash-based load balancing algorithms. bool source_ip = 1 [(validate.rules).bool = {const: true}]; } } // The stat prefix used when emitting UDP proxy filter stats. string stat_prefix = 1 [(validate.rules).string = {min_len: 1}]; oneof route_specifier { option (validate.required) = true; // The upstream cluster to connect to. string cluster = 2 [(validate.rules).string = {min_len: 1}]; } // The idle timeout for sessions. Idle is defined as no datagrams between received or sent by // the session. The default if not specified is 1 minute. google.protobuf.Duration idle_timeout = 3; // Use the remote downstream IP address as the sender IP address when sending packets to upstream hosts. // This option requires Envoy to be run with the *CAP_NET_ADMIN* capability on Linux. // And the IPv6 stack must be enabled on Linux kernel. // This option does not preserve the remote downstream port. // If this option is enabled, the IP address of sent datagrams will be changed to the remote downstream IP address. // This means that Envoy will not receive packets that are sent by upstream hosts because the upstream hosts // will send the packets with the remote downstream IP address as the destination. All packets will be routed // to the remote downstream directly if there are route rules on the upstream host side. // There are two options to return the packets back to the remote downstream. // The first one is to use DSR (Direct Server Return). // The other one is to configure routing rules on the upstream hosts to forward // all packets back to Envoy and configure iptables rules on the host running Envoy to // forward all packets from upstream hosts to the Envoy process so that Envoy can forward the packets to the downstream. // If the platform does not support this option, Envoy will raise a configuration error. bool use_original_src_ip = 4; // Optional configuration for UDP proxy hash policies. If hash_policies is not set, the hash-based // load balancing algorithms will select a host randomly. Currently the number of hash policies is // limited to 1. repeated HashPolicy hash_policies = 5 [(validate.rules).repeated = {max_items: 1}]; } ================================================ FILE: api/envoy/extensions/internal_redirect/allow_listed_routes/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/internal_redirect/allow_listed_routes/v3/allow_listed_routes_config.proto ================================================ syntax = "proto3"; package envoy.extensions.internal_redirect.allow_listed_routes.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.allow_listed_routes.v3"; option java_outer_classname = "AllowListedRoutesConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Allow listed routes internal redirect predicate] // An internal redirect predicate that accepts only explicitly allowed target routes. // [#extension: envoy.internal_redirect_predicates.allow_listed_routes] message AllowListedRoutesConfig { // The list of routes that's allowed as redirect target by this predicate, // identified by the route's :ref:`name `. // Empty route names are not allowed. repeated string allowed_route_names = 1 [(validate.rules).repeated = {items {string {min_len: 1}}}]; } ================================================ FILE: api/envoy/extensions/internal_redirect/previous_routes/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/internal_redirect/previous_routes/v3/previous_routes_config.proto ================================================ syntax = "proto3"; package envoy.extensions.internal_redirect.previous_routes.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.previous_routes.v3"; option java_outer_classname = "PreviousRoutesConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Previous routes internal redirect predicate] // An internal redirect predicate that rejects redirect targets that are pointing // to a route that has been followed by a previous redirect from the current route. // [#extension: envoy.internal_redirect_predicates.previous_routes] message PreviousRoutesConfig { } ================================================ FILE: api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/internal_redirect/safe_cross_scheme/v3/safe_cross_scheme_config.proto ================================================ syntax = "proto3"; package envoy.extensions.internal_redirect.safe_cross_scheme.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.internal_redirect.safe_cross_scheme.v3"; option java_outer_classname = "SafeCrossSchemeConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SafeCrossScheme internal redirect predicate] // An internal redirect predicate that checks the scheme between the // downstream url and the redirect target url and allows a) same scheme // redirect and b) safe cross scheme redirect, which means if the downstream // scheme is HTTPS, both HTTPS and HTTP redirect targets are allowed, but if the // downstream scheme is HTTP, only HTTP redirect targets are allowed. // [#extension: // envoy.internal_redirect_predicates.safe_cross_scheme] message SafeCrossSchemeConfig { } ================================================ FILE: api/envoy/extensions/network/socket_interface/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/network/socket_interface/v3/default_socket_interface.proto ================================================ syntax = "proto3"; package envoy.extensions.network.socket_interface.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.network.socket_interface.v3"; option java_outer_classname = "DefaultSocketInterfaceProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Default Socket Interface configuration] // Configuration for default socket interface that relies on OS dependent syscall to create // sockets. message DefaultSocketInterface { } ================================================ FILE: api/envoy/extensions/retry/host/omit_host_metadata/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/retry/omit_host_metadata/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/retry/host/omit_host_metadata/v3/omit_host_metadata_config.proto ================================================ syntax = "proto3"; package envoy.extensions.retry.host.omit_host_metadata.v3; import "envoy/config/core/v3/base.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.retry.host.omit_host_metadata.v3"; option java_outer_classname = "OmitHostMetadataConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Omit host metadata retry predicate] // A retry host predicate that can be used to reject a host based on // predefined metadata match criteria. // [#extension: envoy.retry_host_predicates.omit_host_metadata] message OmitHostMetadataConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.retry.omit_host_metadata.v2.OmitHostMetadataConfig"; // Retry host predicate metadata match criteria. The hosts in // the upstream cluster with matching metadata will be omitted while // attempting a retry of a failed request. The metadata should be specified // under the *envoy.lb* key. config.core.v3.Metadata metadata_match = 1; } ================================================ FILE: api/envoy/extensions/retry/priority/previous_priorities/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/retry/previous_priorities:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/retry/priority/previous_priorities/v3/previous_priorities_config.proto ================================================ syntax = "proto3"; package envoy.extensions.retry.priority.previous_priorities.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.retry.priority.previous_priorities.v3"; option java_outer_classname = "PreviousPrioritiesConfigProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Previous priorities retry selector] // A retry host selector that attempts to spread retries between priorities, even if certain // priorities would not normally be attempted due to higher priorities being available. // // As priorities get excluded, load will be distributed amongst the remaining healthy priorities // based on the relative health of the priorities, matching how load is distributed during regular // host selection. For example, given priority healths of {100, 50, 50}, the original load will be // {100, 0, 0} (since P0 has capacity to handle 100% of the traffic). If P0 is excluded, the load // changes to {0, 50, 50}, because P1 is only able to handle 50% of the traffic, causing the // remaining to spill over to P2. // // Each priority attempted will be excluded until there are no healthy priorities left, at which // point the list of attempted priorities will be reset, essentially starting from the beginning. // For example, given three priorities P0, P1, P2 with healthy % of 100, 0 and 50 respectively, the // following sequence of priorities would be selected (assuming update_frequency = 1): // Attempt 1: P0 (P0 is 100% healthy) // Attempt 2: P2 (P0 already attempted, P2 only healthy priority) // Attempt 3: P0 (no healthy priorities, reset) // Attempt 4: P2 // // In the case of all upstream hosts being unhealthy, no adjustments will be made to the original // priority load, so behavior should be identical to not using this plugin. // // Using this PriorityFilter requires rebuilding the priority load, which runs in O(# of // priorities), which might incur significant overhead for clusters with many priorities. // [#extension: envoy.retry_priorities.previous_priorities] message PreviousPrioritiesConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.retry.previous_priorities.PreviousPrioritiesConfig"; // How often the priority load should be updated based on previously attempted priorities. Useful // to allow each priorities to receive more than one request before being excluded or to reduce // the number of times that the priority load has to be recomputed. // // For example, by setting this to 2, then the first two attempts (initial attempt and first // retry) will use the unmodified priority load. The third and fourth attempt will use priority // load which excludes the priorities routed to with the first two attempts, and the fifth and // sixth attempt will use the priority load excluding the priorities used for the first four // attempts. // // Must be greater than 0. int32 update_frequency = 1 [(validate.rules).int32 = {gt: 0}]; } ================================================ FILE: api/envoy/extensions/stat_sinks/wasm/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/extensions/wasm/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/stat_sinks/wasm/v3/wasm.proto ================================================ syntax = "proto3"; package envoy.extensions.stat_sinks.wasm.v3; import "envoy/extensions/wasm/v3/wasm.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.stat_sinks.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] // Wasm :ref:`configuration overview `. // [#extension: envoy.stat_sinks.wasm] message Wasm { // General Plugin configuration. envoy.extensions.wasm.v3.PluginConfig config = 1; } ================================================ FILE: api/envoy/extensions/tracers/datadog/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/trace/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/tracers/datadog/v4alpha/datadog.proto ================================================ syntax = "proto3"; package envoy.extensions.tracers.datadog.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.tracers.datadog.v4alpha"; option java_outer_classname = "DatadogProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Datadog tracer] // Configuration for the Datadog tracer. // [#extension: envoy.tracers.datadog] message DatadogConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.DatadogConfig"; // The cluster to use for submitting traces to the Datadog agent. string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The name used for the service when traces are generated by envoy. string service_name = 2 [(validate.rules).string = {min_len: 1}]; } ================================================ FILE: api/envoy/extensions/tracers/dynamic_ot/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/trace/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/tracers/dynamic_ot/v4alpha/dynamic_ot.proto ================================================ syntax = "proto3"; package envoy.extensions.tracers.dynamic_ot.v4alpha; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.tracers.dynamic_ot.v4alpha"; option java_outer_classname = "DynamicOtProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Dynamically loadable OpenTracing tracer] // DynamicOtConfig is used to dynamically load a tracer from a shared library // that implements the `OpenTracing dynamic loading API // `_. // [#extension: envoy.tracers.dynamic_ot] message DynamicOtConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.DynamicOtConfig"; // Dynamic library implementing the `OpenTracing API // `_. string library = 1 [(validate.rules).string = {min_len: 1}]; // The configuration to use when creating a tracer from the given dynamic // library. google.protobuf.Struct config = 2; } ================================================ FILE: api/envoy/extensions/tracers/lightstep/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/trace/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/tracers/lightstep/v4alpha/lightstep.proto ================================================ syntax = "proto3"; package envoy.extensions.tracers.lightstep.v4alpha; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.tracers.lightstep.v4alpha"; option java_outer_classname = "LightstepProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: LightStep tracer] // Configuration for the LightStep tracer. // [#extension: envoy.tracers.lightstep] message LightstepConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.LightstepConfig"; // Available propagation modes enum PropagationMode { // Propagate trace context in the single header x-ot-span-context. ENVOY = 0; // Propagate trace context using LightStep's native format. LIGHTSTEP = 1; // Propagate trace context using the b3 format. B3 = 2; // Propagation trace context using the w3 trace-context standard. TRACE_CONTEXT = 3; } // The cluster manager cluster that hosts the LightStep collectors. string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // File containing the access token to the `LightStep // `_ API. string access_token_file = 2 [(validate.rules).string = {min_len: 1}]; // Propagation modes to use by LightStep's tracer. repeated PropagationMode propagation_modes = 3 [(validate.rules).repeated = {items {enum {defined_only: true}}}]; } ================================================ FILE: api/envoy/extensions/tracers/opencensus/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/trace/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_config_proto", ], ) ================================================ FILE: api/envoy/extensions/tracers/opencensus/v4alpha/opencensus.proto ================================================ syntax = "proto3"; package envoy.extensions.tracers.opencensus.v4alpha; import "envoy/config/core/v4alpha/grpc_service.proto"; import "opencensus/proto/trace/v1/trace_config.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.tracers.opencensus.v4alpha"; option java_outer_classname = "OpencensusProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: OpenCensus tracer] // Configuration for the OpenCensus tracer. // [#next-free-field: 15] // [#extension: envoy.tracers.opencensus] message OpenCensusConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.OpenCensusConfig"; enum TraceContext { // No-op default, no trace context is utilized. NONE = 0; // W3C Trace-Context format "traceparent:" header. TRACE_CONTEXT = 1; // Binary "grpc-trace-bin:" header. GRPC_TRACE_BIN = 2; // "X-Cloud-Trace-Context:" header. CLOUD_TRACE_CONTEXT = 3; // X-B3-* headers. B3 = 4; } reserved 7; // Configures tracing, e.g. the sampler, max number of annotations, etc. .opencensus.proto.trace.v1.TraceConfig trace_config = 1; // Enables the stdout exporter if set to true. This is intended for debugging // purposes. bool stdout_exporter_enabled = 2; // Enables the Stackdriver exporter if set to true. The project_id must also // be set. bool stackdriver_exporter_enabled = 3; // The Cloud project_id to use for Stackdriver tracing. string stackdriver_project_id = 4; // (optional) By default, the Stackdriver exporter will connect to production // Stackdriver. If stackdriver_address is non-empty, it will instead connect // to this address, which is in the gRPC format: // https://github.com/grpc/grpc/blob/master/doc/naming.md string stackdriver_address = 10; // (optional) The gRPC server that hosts Stackdriver tracing service. Only // Google gRPC is supported. If :ref:`target_uri ` // is not provided, the default production Stackdriver address will be used. config.core.v4alpha.GrpcService stackdriver_grpc_service = 13; // Enables the Zipkin exporter if set to true. The url and service name must // also be set. bool zipkin_exporter_enabled = 5; // The URL to Zipkin, e.g. "http://127.0.0.1:9411/api/v2/spans" string zipkin_url = 6; // Enables the OpenCensus Agent exporter if set to true. The ocagent_address or // ocagent_grpc_service must also be set. bool ocagent_exporter_enabled = 11; // The address of the OpenCensus Agent, if its exporter is enabled, in gRPC // format: https://github.com/grpc/grpc/blob/master/doc/naming.md // [#comment:TODO: deprecate this field] string ocagent_address = 12; // (optional) The gRPC server hosted by the OpenCensus Agent. Only Google gRPC is supported. // This is only used if the ocagent_address is left empty. config.core.v4alpha.GrpcService ocagent_grpc_service = 14; // List of incoming trace context headers we will accept. First one found // wins. repeated TraceContext incoming_trace_context = 8; // List of outgoing trace context headers we will produce. repeated TraceContext outgoing_trace_context = 9; } ================================================ FILE: api/envoy/extensions/tracers/xray/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/trace/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/tracers/xray/v4alpha/xray.proto ================================================ syntax = "proto3"; package envoy.extensions.tracers.xray.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.tracers.xray.v4alpha"; option java_outer_classname = "XrayProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: AWS X-Ray Tracer Configuration] // Configuration for AWS X-Ray tracer message XRayConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.XRayConfig"; message SegmentFields { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.XRayConfig.SegmentFields"; // The type of AWS resource, e.g. "AWS::AppMesh::Proxy". string origin = 1; // AWS resource metadata dictionary. // See: `X-Ray Segment Document documentation `__ google.protobuf.Struct aws = 2; } // The UDP endpoint of the X-Ray Daemon where the spans will be sent. // If this value is not set, the default value of 127.0.0.1:2000 will be used. config.core.v4alpha.SocketAddress daemon_endpoint = 1; // The name of the X-Ray segment. string segment_name = 2 [(validate.rules).string = {min_len: 1}]; // The location of a local custom sampling rules JSON file. // For an example of the sampling rules see: // `X-Ray SDK documentation // `_ config.core.v4alpha.DataSource sampling_rule_manifest = 3; // Optional custom fields to be added to each trace segment. // see: `X-Ray Segment Document documentation // `__ SegmentFields segment_fields = 4; } ================================================ FILE: api/envoy/extensions/tracers/zipkin/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/config/trace/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/tracers/zipkin/v4alpha/zipkin.proto ================================================ syntax = "proto3"; package envoy.extensions.tracers.zipkin.v4alpha; import "google/protobuf/wrappers.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.tracers.zipkin.v4alpha"; option java_outer_classname = "ZipkinProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Zipkin tracer] // Configuration for the Zipkin tracer. // [#extension: envoy.tracers.zipkin] // [#next-free-field: 6] message ZipkinConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.config.trace.v3.ZipkinConfig"; // Available Zipkin collector endpoint versions. enum CollectorEndpointVersion { // Zipkin API v1, JSON over HTTP. // [#comment: The default implementation of Zipkin client before this field is added was only v1 // and the way user configure this was by not explicitly specifying the version. Consequently, // before this is added, the corresponding Zipkin collector expected to receive v1 payload. // Hence the motivation of adding HTTP_JSON_V1 as the default is to avoid a breaking change when // user upgrading Envoy with this change. Furthermore, we also immediately deprecate this field, // since in Zipkin realm this v1 version is considered to be not preferable anymore.] DEPRECATED_AND_UNAVAILABLE_DO_NOT_USE = 0 [deprecated = true, (envoy.annotations.disallowed_by_default_enum) = true]; // Zipkin API v2, JSON over HTTP. HTTP_JSON = 1; // Zipkin API v2, protobuf over HTTP. HTTP_PROTO = 2; // [#not-implemented-hide:] GRPC = 3; } // The cluster manager cluster that hosts the Zipkin collectors. Note that the // Zipkin cluster must be defined in the :ref:`Bootstrap static cluster // resources `. string collector_cluster = 1 [(validate.rules).string = {min_len: 1}]; // The API endpoint of the Zipkin service where the spans will be sent. When // using a standard Zipkin installation, the API endpoint is typically // /api/v1/spans, which is the default value. string collector_endpoint = 2 [(validate.rules).string = {min_len: 1}]; // Determines whether a 128bit trace id will be used when creating a new // trace instance. The default value is false, which will result in a 64 bit trace id being used. bool trace_id_128bit = 3; // Determines whether client and server spans will share the same span context. // The default value is true. google.protobuf.BoolValue shared_span_context = 4; // Determines the selected collector endpoint version. By default, the ``HTTP_JSON_V1`` will be // used. CollectorEndpointVersion collector_endpoint_version = 5; } ================================================ FILE: api/envoy/extensions/transport_sockets/alts/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/transport_socket/alts/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/transport_sockets/alts/v3/alts.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.alts.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.alts.v3"; option java_outer_classname = "AltsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: ALTS] // [#extension: envoy.transport_sockets.alts] // Configuration for ALTS transport socket. This provides Google's ALTS protocol to Envoy. // https://cloud.google.com/security/encryption-in-transit/application-layer-transport-security/ message Alts { option (udpa.annotations.versioning).previous_message_type = "envoy.config.transport_socket.alts.v2alpha.Alts"; // The location of a handshaker service, this is usually 169.254.169.254:8080 // on GCE. string handshaker_service = 1 [(validate.rules).string = {min_len: 1}]; // The acceptable service accounts from peer, peers not in the list will be rejected in the // handshake validation step. If empty, no validation will be performed. repeated string peer_service_accounts = 2; } ================================================ FILE: api/envoy/extensions/transport_sockets/proxy_protocol/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/transport_sockets/proxy_protocol/v3/upstream_proxy_protocol.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.proxy_protocol.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/proxy_protocol.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.proxy_protocol.v3"; option java_outer_classname = "UpstreamProxyProtocolProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Upstream Proxy Protocol] // [#extension: envoy.transport_sockets.upstream_proxy_protocol] // Configuration for PROXY protocol socket message ProxyProtocolUpstreamTransport { // The PROXY protocol settings config.core.v3.ProxyProtocolConfig config = 1; // The underlying transport socket being wrapped. config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/extensions/transport_sockets/quic/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/extensions/transport_sockets/tls/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/transport_sockets/quic/v3/quic_transport.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.quic.v3; import "envoy/extensions/transport_sockets/tls/v3/tls.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v3"; option java_outer_classname = "QuicTransportProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: quic transport] // [#extension: envoy.transport_sockets.quic] // Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. message QuicDownstreamTransport { tls.v3.DownstreamTlsContext downstream_tls_context = 1 [(validate.rules).message = {required: true}]; } // Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. message QuicUpstreamTransport { tls.v3.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/extensions/transport_sockets/quic/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/tls/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/transport_sockets/quic/v4alpha/quic_transport.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.quic.v4alpha; import "envoy/extensions/transport_sockets/tls/v4alpha/tls.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.quic.v4alpha"; option java_outer_classname = "QuicTransportProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: quic transport] // [#extension: envoy.transport_sockets.quic] // Configuration for Downstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. message QuicDownstreamTransport { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.quic.v3.QuicDownstreamTransport"; tls.v4alpha.DownstreamTlsContext downstream_tls_context = 1 [(validate.rules).message = {required: true}]; } // Configuration for Upstream QUIC transport socket. This provides Google's implementation of Google QUIC and IETF QUIC to Envoy. message QuicUpstreamTransport { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.quic.v3.QuicUpstreamTransport"; tls.v4alpha.UpstreamTlsContext upstream_tls_context = 1 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/extensions/transport_sockets/raw_buffer/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/transport_socket/raw_buffer/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/transport_sockets/raw_buffer/v3/raw_buffer.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.raw_buffer.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.raw_buffer.v3"; option java_outer_classname = "RawBufferProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Raw Buffer] // [#extension: envoy.transport_sockets.raw_buffer] // Configuration for raw buffer transport socket. message RawBuffer { option (udpa.annotations.versioning).previous_message_type = "envoy.config.transport_socket.raw_buffer.v2.RawBuffer"; } ================================================ FILE: api/envoy/extensions/transport_sockets/tap/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", "//envoy/extensions/common/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/transport_sockets/tap/v3/tap.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.tap.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/tap/v3/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap] // [#extension: envoy.transport_sockets.tap] // Configuration for tap transport socket. This wraps another transport socket, providing the // ability to interpose and record in plain text any traffic that is surfaced to Envoy. message Tap { option (udpa.annotations.versioning).previous_message_type = "envoy.config.transport_socket.tap.v2alpha.Tap"; // Common configuration for the tap transport socket. common.tap.v3.CommonExtensionConfig common_config = 1 [(validate.rules).message = {required: true}]; // The underlying transport socket being wrapped. config.core.v3.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/extensions/transport_sockets/tap/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/common/tap/v4alpha:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/transport_sockets/tap/v4alpha/tap.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.tap.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/extensions/common/tap/v4alpha/common.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tap.v4alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Tap] // [#extension: envoy.transport_sockets.tap] // Configuration for tap transport socket. This wraps another transport socket, providing the // ability to interpose and record in plain text any traffic that is surfaced to Envoy. message Tap { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tap.v3.Tap"; // Common configuration for the tap transport socket. common.tap.v4alpha.CommonExtensionConfig common_config = 1 [(validate.rules).message = {required: true}]; // The underlying transport socket being wrapped. config.core.v4alpha.TransportSocket transport_socket = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/extensions/transport_sockets/tls/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/api/v2/auth:pkg", "//envoy/config/core/v3:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/extensions/transport_sockets/tls/v3/cert.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; import "udpa/annotations/status.proto"; import public "envoy/extensions/transport_sockets/tls/v3/common.proto"; import public "envoy/extensions/transport_sockets/tls/v3/secret.proto"; import public "envoy/extensions/transport_sockets/tls/v3/tls.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "CertProto"; option java_multiple_files = true; ================================================ FILE: api/envoy/extensions/transport_sockets/tls/v3/common.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; import "envoy/config/core/v3/base.proto"; import "envoy/type/matcher/v3/string.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common TLS configuration] message TlsParameters { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsParameters"; enum TlsProtocol { // Envoy will choose the optimal TLS version. TLS_AUTO = 0; // TLS 1.0 TLSv1_0 = 1; // TLS 1.1 TLSv1_1 = 2; // TLS 1.2 TLSv1_2 = 3; // TLS 1.3 TLSv1_3 = 4; } // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list // `_ // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not // specified, the default list will be used. // // In non-FIPS builds, the default cipher list is: // // .. code-block:: none // // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] // ECDHE-ECDSA-AES128-SHA // ECDHE-RSA-AES128-SHA // AES128-GCM-SHA256 // AES128-SHA // ECDHE-ECDSA-AES256-GCM-SHA384 // ECDHE-RSA-AES256-GCM-SHA384 // ECDHE-ECDSA-AES256-SHA // ECDHE-RSA-AES256-SHA // AES256-GCM-SHA384 // AES256-SHA // // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: // // .. code-block:: none // // ECDHE-ECDSA-AES128-GCM-SHA256 // ECDHE-RSA-AES128-GCM-SHA256 // ECDHE-ECDSA-AES128-SHA // ECDHE-RSA-AES128-SHA // AES128-GCM-SHA256 // AES128-SHA // ECDHE-ECDSA-AES256-GCM-SHA384 // ECDHE-RSA-AES256-GCM-SHA384 // ECDHE-ECDSA-AES256-SHA // ECDHE-RSA-AES256-SHA // AES256-GCM-SHA384 // AES256-SHA repeated string cipher_suites = 3; // If specified, the TLS connection will only support the specified ECDH // curves. If not specified, the default curves will be used. // // In non-FIPS builds, the default curves are: // // .. code-block:: none // // X25519 // P-256 // // In builds using :ref:`BoringSSL FIPS `, the default curve is: // // .. code-block:: none // // P-256 repeated string ecdh_curves = 4; } // BoringSSL private key method configuration. The private key methods are used for external // (potentially asynchronous) signing and decryption operations. Some use cases for private key // methods would be TPM support and TLS acceleration. message PrivateKeyProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.PrivateKeyProvider"; reserved 2; reserved "config"; // Private key method provider name. The name must match a // supported private key method provider type. string provider_name = 1 [(validate.rules).string = {min_len: 1}]; // Private key method provider specific configuration. oneof config_type { google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; } } // [#next-free-field: 7] message TlsCertificate { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsCertificate"; // The TLS certificate chain. config.core.v3.DataSource certificate_chain = 1; // The TLS private key. config.core.v3.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; // BoringSSL private key method provider. This is an alternative to :ref:`private_key // ` field. This can't be // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key // ` and // :ref:`private_key_provider // ` fields will result in an // error. PrivateKeyProvider private_key_provider = 6; // The password to decrypt the TLS private key. If this field is not set, it is assumed that the // TLS private key is not password encrypted. config.core.v3.DataSource password = 3 [(udpa.annotations.sensitive) = true]; // The OCSP response to be stapled with this certificate during the handshake. // The response must be DER-encoded and may only be provided via ``filename`` or // ``inline_bytes``. The response may pertain to only one certificate. config.core.v3.DataSource ocsp_staple = 4; // [#not-implemented-hide:] repeated config.core.v3.DataSource signed_certificate_timestamp = 5; } message TlsSessionTicketKeys { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.TlsSessionTicketKeys"; // Keys for encrypting and decrypting TLS session tickets. The // first key in the array contains the key to encrypt all new sessions created by this context. // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys // by, for example, putting the new key first, and the previous key second. // // If :ref:`session_ticket_keys ` // is not specified, the TLS library will still support resuming sessions via tickets, but it will // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts // or on different hosts. // // Each key must contain exactly 80 bytes of cryptographically-secure random data. For // example, the output of ``openssl rand 80``. // // .. attention:: // // Using this feature has serious security considerations and risks. Improper handling of keys // may result in loss of secrecy in connections, even if ciphers supporting perfect forward // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some // discussion. To minimize the risk, you must: // // * Keep the session ticket keys at least as secure as your TLS certificate private keys // * Rotate session ticket keys at least daily, and preferably hourly // * Always generate keys using a cryptographically-secure random data source repeated config.core.v3.DataSource keys = 1 [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; } // [#next-free-field: 11] message CertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CertificateValidationContext"; // Peer certificate verification mode. enum TrustChainVerification { // Perform default certificate verification (e.g., against CA / verification lists) VERIFY_TRUST_CHAIN = 0; // Connections where the certificate fails verification will be permitted. // For HTTP connections, the result of certificate verification can be used in route matching. ( // see :ref:`validated ` ). ACCEPT_UNTRUSTED = 1; } reserved 4, 5; reserved "verify_subject_alt_name"; // TLS certificate data containing certificate authority certificates to use in verifying // a presented peer certificate (e.g. server certificate for clusters or client certificate // for listeners). If not specified and a peer certificate is presented it will not be // verified. By default, a client certificate is optional, unless one of the additional // options (:ref:`require_client_certificate // `, // :ref:`verify_certificate_spki // `, // :ref:`verify_certificate_hash // `, or // :ref:`match_subject_alt_names // `) is also // specified. // // It can optionally contain certificate revocation lists, in which case Envoy will verify // that the presented peer certificate has not been revoked by one of the included CRLs. Note // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be // provided for all certificate authorities in that chain. Failure to do so will result in // verification failure for both revoked and unrevoked certificates from that chain. // // See :ref:`the TLS overview ` for a list of common // system CA locations. config.core.v3.DataSource trusted_ca = 1; // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate // matches one of the specified values. // // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate // can be generated with the following command: // // .. code-block:: bash // // $ openssl x509 -in path/to/client.crt -noout -pubkey // | openssl pkey -pubin -outform DER // | openssl dgst -sha256 -binary // | openssl enc -base64 // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= // // This is the format used in HTTP Public Key Pinning. // // When both: // :ref:`verify_certificate_hash // ` and // :ref:`verify_certificate_spki // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. // // .. attention:: // // This option is preferred over :ref:`verify_certificate_hash // `, // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. // // A hex-encoded SHA-256 of the certificate can be generated with the following command: // // .. code-block:: bash // // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a // // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate // can be generated with the following command: // // .. code-block:: bash // // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A // // Both of those formats are acceptable. // // When both: // :ref:`verify_certificate_hash // ` and // :ref:`verify_certificate_spki // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative name matchers. Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified matches. // // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be // configured with exact match type in the :ref:`string matcher `. // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", // it should be configured as shown below. // // .. code-block:: yaml // // match_subject_alt_names: // exact: "api.example.com" // // .. attention:: // // Subject Alternative Names are easily spoofable and verifying only them is insecure, // therefore this option must be used together with :ref:`trusted_ca // `. repeated type.matcher.v3.StringMatcher match_subject_alt_names = 9; // [#not-implemented-hide:] Must present signed certificate time-stamp. google.protobuf.BoolValue require_signed_certificate_timestamp = 6; // An optional `certificate revocation list // `_ // (in PEM format). If specified, Envoy will verify that the presented peer // certificate has not been revoked by this CRL. If this DataSource contains // multiple CRLs, all of them will be used. Note that if a CRL is provided // for any certificate authority in a trust chain, a CRL must be provided // for all certificate authorities in that chain. Failure to do so will // result in verification failure for both revoked and unrevoked certificates // from that chain. config.core.v3.DataSource crl = 7; // If specified, Envoy will not reject expired certificates. bool allow_expired_certificate = 8; // Certificate trust chain verification mode. TrustChainVerification trust_chain_verification = 10 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/extensions/transport_sockets/tls/v3/secret.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/config_source.proto"; import "envoy/extensions/transport_sockets/tls/v3/common.proto"; import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "SecretProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Secrets configuration] message GenericSecret { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.GenericSecret"; // Secret of generic type and is available to filters. config.core.v3.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; } message SdsSecretConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.SdsSecretConfig"; // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. string name = 1 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator sds_resource_locator = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; config.core.v3.ConfigSource sds_config = 2; } // [#next-free-field: 6] message Secret { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.Secret"; // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. string name = 1; oneof type { TlsCertificate tls_certificate = 2; TlsSessionTicketKeys session_ticket_keys = 3; CertificateValidationContext validation_context = 4; GenericSecret generic_secret = 5; } } ================================================ FILE: api/envoy/extensions/transport_sockets/tls/v3/tls.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v3; import "envoy/config/core/v3/extension.proto"; import "envoy/extensions/transport_sockets/tls/v3/common.proto"; import "envoy/extensions/transport_sockets/tls/v3/secret.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v3"; option java_outer_classname = "TlsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: TLS transport socket] // [#extension: envoy.transport_sockets.tls] // The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. message UpstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.UpstreamTlsContext"; // Common TLS context settings. // // .. attention:: // // Server certificate verification is not enabled by default. Configure // :ref:`trusted_ca` to enable // verification. CommonTlsContext common_tls_context = 1; // SNI string to use when creating TLS backend connections. string sni = 2 [(validate.rules).string = {max_bytes: 255}]; // If true, server-initiated TLS renegotiation will be allowed. // // .. attention:: // // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. bool allow_renegotiation = 3; // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets // for TLSv1.2 and older) to store for the purpose of session resumption. // // Defaults to 1, setting this to 0 disables session resumption. google.protobuf.UInt32Value max_session_keys = 4; } // [#next-free-field: 9] message DownstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.DownstreamTlsContext"; enum OcspStaplePolicy { // OCSP responses are optional. If an OCSP response is absent // or expired, the associated certificate will be used for // connections without an OCSP staple. LENIENT_STAPLING = 0; // OCSP responses are optional. If an OCSP response is absent, // the associated certificate will be used without an // OCSP staple. If a response is provided but is expired, // the associated certificate will not be used for // subsequent connections. If no suitable certificate is found, // the connection is rejected. STRICT_STAPLING = 1; // OCSP responses are required. Configuration will fail if // a certificate is provided without an OCSP response. If a // response expires, the associated certificate will not be // used connections. If no suitable certificate is found, the // connection is rejected. MUST_STAPLE = 2; } // Common TLS context settings. CommonTlsContext common_tls_context = 1; // If specified, Envoy will reject connections without a valid client // certificate. google.protobuf.BoolValue require_client_certificate = 2; // If specified, Envoy will reject connections without a valid and matching SNI. // [#not-implemented-hide:] google.protobuf.BoolValue require_sni = 3; oneof session_ticket_keys_type { // TLS session ticket key settings. TlsSessionTicketKeys session_ticket_keys = 4; // Config for fetching TLS session ticket keys via SDS API. SdsSecretConfig session_ticket_keys_sds_secret_config = 5; // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using // the keys specified through either :ref:`session_ticket_keys ` // or :ref:`session_ticket_keys_sds_secret_config `. // If this config is set to false and no keys are explicitly configured, the TLS server will issue // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the // implication that sessions cannot be resumed across hot restarts or on different hosts. bool disable_stateless_session_resumption = 7; } // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) // ` // only seconds could be specified (fractional seconds are going to be ignored). google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { lt {seconds: 4294967296} gte {} }]; // Config for whether to use certificates if they do not have // an accompanying OCSP response or if the response expires at runtime. // Defaults to LENIENT_STAPLING OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; } // TLS context shared by both client and server TLS contexts. // [#next-free-field: 14] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext"; // Config for Certificate provider to get certificates. This provider should allow certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. message CertificateProvider { // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. string name = 1 [(validate.rules).string = {min_len: 1}]; // Provider specific config. // Note: an implementation is expected to dedup multiple instances of the same config // to maintain a single certificate-provider instance. The sharing can happen, for // example, among multiple clusters or between the tls_certificate and validation_context // certificate providers of a cluster. // This config could be supplied inline or (in future) a named xDS resource. oneof config { option (validate.required) = true; config.core.v3.TypedExtensionConfig typed_config = 2; } } // Similar to CertificateProvider above, but allows the provider instances to be configured on // the client side instead of being sent from the control plane. message CertificateProviderInstance { // Provider instance name. This name must be defined in the client's configuration (e.g., a // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config // field that would be sent in the CertificateProvider message if the config was sent by the // control plane). If not present, defaults to "default". // // Instance names should generally be defined not in terms of the underlying provider // implementation (e.g., "file_watcher") but rather in terms of the function of the // certificates (e.g., "foo_deployment_identity"). string instance_name = 1; // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "example.com" to specify a certificate for a // particular domain. Not all provider instances will actually use this field, so the value // defaults to the empty string. string certificate_name = 2; } message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.auth.CommonTlsContext.CombinedCertificateValidationContext"; // How to validate peer certificates. CertificateValidationContext default_validation_context = 1 [(validate.rules).message = {required: true}]; // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, // or validation_context_certificate_provider_instance may be used. SdsSecretConfig validation_context_sds_secret_config = 2 [ (validate.rules).message = {required: true}, (udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context" ]; // Certificate provider for fetching validation context. // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, // or validation_context_certificate_provider_instance may be used. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 3 [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; // Certificate provider instance for fetching validation context. // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, // or validation_context_certificate_provider_instance may be used. // [#not-implemented-hide:] CertificateProviderInstance validation_context_certificate_provider_instance = 4 [(udpa.annotations.field_migrate).oneof_promotion = "dynamic_validation_context"]; } reserved 5; // TLS protocol versions, cipher suites etc. TlsParameters tls_params = 1; // :ref:`Multiple TLS certificates ` can be associated with the // same context to allow both RSA and ECDSA certificates. // // Only a single TLS certificate is supported in client contexts. In server contexts, the first // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is // used for clients that support ECDSA. repeated TlsCertificate tls_certificates = 2; // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 1}]; // Certificate provider for fetching TLS certificates. // [#not-implemented-hide:] CertificateProvider tls_certificate_certificate_provider = 9; // Certificate provider instance for fetching TLS certificates. // [#not-implemented-hide:] CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. SdsSecretConfig validation_context_sds_secret_config = 7; // Combined certificate validation context holds a default CertificateValidationContext // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic // and default CertificateValidationContext are merged into a new CertificateValidationContext // for validation. This merge is done by Message::MergeFrom(), so dynamic // CertificateValidationContext overwrites singular fields in default // CertificateValidationContext, and concatenates repeated fields to default // CertificateValidationContext, and logical OR is applied to boolean fields. CombinedCertificateValidationContext combined_validation_context = 8; // Certificate provider for fetching validation context. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 10; // Certificate provider instance for fetching validation context. // [#not-implemented-hide:] CertificateProviderInstance validation_context_certificate_provider_instance = 12; } // Supplies the list of ALPN protocols that the listener should expose. In // practice this is likely to be set to one of two values (see the // :ref:`codec_type // ` // parameter in the HTTP connection manager for more information): // // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. // * "http/1.1" If the listener is only going to support HTTP/1.1. // // There is no default for this parameter. If empty, Envoy will not expose ALPN. repeated string alpn_protocols = 4; // Custom TLS handshaker. If empty, defaults to native TLS handshaking // behavior. config.core.v3.TypedExtensionConfig custom_handshaker = 13; } ================================================ FILE: api/envoy/extensions/transport_sockets/tls/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/extensions/transport_sockets/tls/v4alpha/common.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "google/protobuf/any.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Common TLS configuration] message TlsParameters { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.TlsParameters"; enum TlsProtocol { // Envoy will choose the optimal TLS version. TLS_AUTO = 0; // TLS 1.0 TLSv1_0 = 1; // TLS 1.1 TLSv1_1 = 2; // TLS 1.2 TLSv1_2 = 3; // TLS 1.3 TLSv1_3 = 4; } // Minimum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_0`` for // servers. TlsProtocol tls_minimum_protocol_version = 1 [(validate.rules).enum = {defined_only: true}]; // Maximum TLS protocol version. By default, it's ``TLSv1_2`` for clients and ``TLSv1_3`` for // servers. TlsProtocol tls_maximum_protocol_version = 2 [(validate.rules).enum = {defined_only: true}]; // If specified, the TLS listener will only support the specified `cipher list // `_ // when negotiating TLS 1.0-1.2 (this setting has no effect when negotiating TLS 1.3). If not // specified, the default list will be used. // // In non-FIPS builds, the default cipher list is: // // .. code-block:: none // // [ECDHE-ECDSA-AES128-GCM-SHA256|ECDHE-ECDSA-CHACHA20-POLY1305] // [ECDHE-RSA-AES128-GCM-SHA256|ECDHE-RSA-CHACHA20-POLY1305] // ECDHE-ECDSA-AES128-SHA // ECDHE-RSA-AES128-SHA // AES128-GCM-SHA256 // AES128-SHA // ECDHE-ECDSA-AES256-GCM-SHA384 // ECDHE-RSA-AES256-GCM-SHA384 // ECDHE-ECDSA-AES256-SHA // ECDHE-RSA-AES256-SHA // AES256-GCM-SHA384 // AES256-SHA // // In builds using :ref:`BoringSSL FIPS `, the default cipher list is: // // .. code-block:: none // // ECDHE-ECDSA-AES128-GCM-SHA256 // ECDHE-RSA-AES128-GCM-SHA256 // ECDHE-ECDSA-AES128-SHA // ECDHE-RSA-AES128-SHA // AES128-GCM-SHA256 // AES128-SHA // ECDHE-ECDSA-AES256-GCM-SHA384 // ECDHE-RSA-AES256-GCM-SHA384 // ECDHE-ECDSA-AES256-SHA // ECDHE-RSA-AES256-SHA // AES256-GCM-SHA384 // AES256-SHA repeated string cipher_suites = 3; // If specified, the TLS connection will only support the specified ECDH // curves. If not specified, the default curves will be used. // // In non-FIPS builds, the default curves are: // // .. code-block:: none // // X25519 // P-256 // // In builds using :ref:`BoringSSL FIPS `, the default curve is: // // .. code-block:: none // // P-256 repeated string ecdh_curves = 4; } // BoringSSL private key method configuration. The private key methods are used for external // (potentially asynchronous) signing and decryption operations. Some use cases for private key // methods would be TPM support and TLS acceleration. message PrivateKeyProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.PrivateKeyProvider"; reserved 2; reserved "config"; // Private key method provider name. The name must match a // supported private key method provider type. string provider_name = 1 [(validate.rules).string = {min_len: 1}]; // Private key method provider specific configuration. oneof config_type { google.protobuf.Any typed_config = 3 [(udpa.annotations.sensitive) = true]; } } // [#next-free-field: 7] message TlsCertificate { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.TlsCertificate"; // The TLS certificate chain. config.core.v4alpha.DataSource certificate_chain = 1; // The TLS private key. config.core.v4alpha.DataSource private_key = 2 [(udpa.annotations.sensitive) = true]; // BoringSSL private key method provider. This is an alternative to :ref:`private_key // ` field. This can't be // marked as ``oneof`` due to API compatibility reasons. Setting both :ref:`private_key // ` and // :ref:`private_key_provider // ` fields will result in an // error. PrivateKeyProvider private_key_provider = 6; // The password to decrypt the TLS private key. If this field is not set, it is assumed that the // TLS private key is not password encrypted. config.core.v4alpha.DataSource password = 3 [(udpa.annotations.sensitive) = true]; // The OCSP response to be stapled with this certificate during the handshake. // The response must be DER-encoded and may only be provided via ``filename`` or // ``inline_bytes``. The response may pertain to only one certificate. config.core.v4alpha.DataSource ocsp_staple = 4; // [#not-implemented-hide:] repeated config.core.v4alpha.DataSource signed_certificate_timestamp = 5; } message TlsSessionTicketKeys { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.TlsSessionTicketKeys"; // Keys for encrypting and decrypting TLS session tickets. The // first key in the array contains the key to encrypt all new sessions created by this context. // All keys are candidates for decrypting received tickets. This allows for easy rotation of keys // by, for example, putting the new key first, and the previous key second. // // If :ref:`session_ticket_keys ` // is not specified, the TLS library will still support resuming sessions via tickets, but it will // use an internally-generated and managed key, so sessions cannot be resumed across hot restarts // or on different hosts. // // Each key must contain exactly 80 bytes of cryptographically-secure random data. For // example, the output of ``openssl rand 80``. // // .. attention:: // // Using this feature has serious security considerations and risks. Improper handling of keys // may result in loss of secrecy in connections, even if ciphers supporting perfect forward // secrecy are used. See https://www.imperialviolet.org/2013/06/27/botchingpfs.html for some // discussion. To minimize the risk, you must: // // * Keep the session ticket keys at least as secure as your TLS certificate private keys // * Rotate session ticket keys at least daily, and preferably hourly // * Always generate keys using a cryptographically-secure random data source repeated config.core.v4alpha.DataSource keys = 1 [(validate.rules).repeated = {min_items: 1}, (udpa.annotations.sensitive) = true]; } // [#next-free-field: 11] message CertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext"; // Peer certificate verification mode. enum TrustChainVerification { // Perform default certificate verification (e.g., against CA / verification lists) VERIFY_TRUST_CHAIN = 0; // Connections where the certificate fails verification will be permitted. // For HTTP connections, the result of certificate verification can be used in route matching. ( // see :ref:`validated ` ). ACCEPT_UNTRUSTED = 1; } reserved 4, 5; reserved "verify_subject_alt_name"; // TLS certificate data containing certificate authority certificates to use in verifying // a presented peer certificate (e.g. server certificate for clusters or client certificate // for listeners). If not specified and a peer certificate is presented it will not be // verified. By default, a client certificate is optional, unless one of the additional // options (:ref:`require_client_certificate // `, // :ref:`verify_certificate_spki // `, // :ref:`verify_certificate_hash // `, or // :ref:`match_subject_alt_names // `) is also // specified. // // It can optionally contain certificate revocation lists, in which case Envoy will verify // that the presented peer certificate has not been revoked by one of the included CRLs. Note // that if a CRL is provided for any certificate authority in a trust chain, a CRL must be // provided for all certificate authorities in that chain. Failure to do so will result in // verification failure for both revoked and unrevoked certificates from that chain. // // See :ref:`the TLS overview ` for a list of common // system CA locations. config.core.v4alpha.DataSource trusted_ca = 1; // An optional list of base64-encoded SHA-256 hashes. If specified, Envoy will verify that the // SHA-256 of the DER-encoded Subject Public Key Information (SPKI) of the presented certificate // matches one of the specified values. // // A base64-encoded SHA-256 of the Subject Public Key Information (SPKI) of the certificate // can be generated with the following command: // // .. code-block:: bash // // $ openssl x509 -in path/to/client.crt -noout -pubkey // | openssl pkey -pubin -outform DER // | openssl dgst -sha256 -binary // | openssl enc -base64 // NvqYIYSbgK2vCJpQhObf77vv+bQWtc5ek5RIOwPiC9A= // // This is the format used in HTTP Public Key Pinning. // // When both: // :ref:`verify_certificate_hash // ` and // :ref:`verify_certificate_spki // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. // // .. attention:: // // This option is preferred over :ref:`verify_certificate_hash // `, // because SPKI is tied to a private key, so it doesn't change when the certificate // is renewed using the same private key. repeated string verify_certificate_spki = 3 [(validate.rules).repeated = {items {string {min_len: 44 max_bytes: 44}}}]; // An optional list of hex-encoded SHA-256 hashes. If specified, Envoy will verify that // the SHA-256 of the DER-encoded presented certificate matches one of the specified values. // // A hex-encoded SHA-256 of the certificate can be generated with the following command: // // .. code-block:: bash // // $ openssl x509 -in path/to/client.crt -outform DER | openssl dgst -sha256 | cut -d" " -f2 // df6ff72fe9116521268f6f2dd4966f51df479883fe7037b39f75916ac3049d1a // // A long hex-encoded and colon-separated SHA-256 (a.k.a. "fingerprint") of the certificate // can be generated with the following command: // // .. code-block:: bash // // $ openssl x509 -in path/to/client.crt -noout -fingerprint -sha256 | cut -d"=" -f2 // DF:6F:F7:2F:E9:11:65:21:26:8F:6F:2D:D4:96:6F:51:DF:47:98:83:FE:70:37:B3:9F:75:91:6A:C3:04:9D:1A // // Both of those formats are acceptable. // // When both: // :ref:`verify_certificate_hash // ` and // :ref:`verify_certificate_spki // ` are specified, // a hash matching value from either of the lists will result in the certificate being accepted. repeated string verify_certificate_hash = 2 [(validate.rules).repeated = {items {string {min_len: 64 max_bytes: 95}}}]; // An optional list of Subject Alternative name matchers. Envoy will verify that the // Subject Alternative Name of the presented certificate matches one of the specified matches. // // When a certificate has wildcard DNS SAN entries, to match a specific client, it should be // configured with exact match type in the :ref:`string matcher `. // For example if the certificate has "\*.example.com" as DNS SAN entry, to allow only "api.example.com", // it should be configured as shown below. // // .. code-block:: yaml // // match_subject_alt_names: // exact: "api.example.com" // // .. attention:: // // Subject Alternative Names are easily spoofable and verifying only them is insecure, // therefore this option must be used together with :ref:`trusted_ca // `. repeated type.matcher.v4alpha.StringMatcher match_subject_alt_names = 9; // [#not-implemented-hide:] Must present signed certificate time-stamp. google.protobuf.BoolValue require_signed_certificate_timestamp = 6; // An optional `certificate revocation list // `_ // (in PEM format). If specified, Envoy will verify that the presented peer // certificate has not been revoked by this CRL. If this DataSource contains // multiple CRLs, all of them will be used. Note that if a CRL is provided // for any certificate authority in a trust chain, a CRL must be provided // for all certificate authorities in that chain. Failure to do so will // result in verification failure for both revoked and unrevoked certificates // from that chain. config.core.v4alpha.DataSource crl = 7; // If specified, Envoy will not reject expired certificates. bool allow_expired_certificate = 8; // Certificate trust chain verification mode. TrustChainVerification trust_chain_verification = 10 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/extensions/transport_sockets/tls/v4alpha/secret.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/config_source.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; import "udpa/core/v1/resource_locator.proto"; import "udpa/annotations/sensitive.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; option java_outer_classname = "SecretProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Secrets configuration] message GenericSecret { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.GenericSecret"; // Secret of generic type and is available to filters. config.core.v4alpha.DataSource secret = 1 [(udpa.annotations.sensitive) = true]; } message SdsSecretConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.SdsSecretConfig"; oneof name_specifier { // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. // When both name and config are specified, then secret can be fetched and/or reloaded via // SDS. When only name is specified, then secret will be loaded from static resources. string name = 1; // Resource locator for SDS. This is mutually exclusive to *name*. // [#not-implemented-hide:] udpa.core.v1.ResourceLocator sds_resource_locator = 3; } config.core.v4alpha.ConfigSource sds_config = 2; } // [#next-free-field: 6] message Secret { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.Secret"; // Name (FQDN, UUID, SPKI, SHA256, etc.) by which the secret can be uniquely referred to. string name = 1; oneof type { TlsCertificate tls_certificate = 2; TlsSessionTicketKeys session_ticket_keys = 3; CertificateValidationContext validation_context = 4; GenericSecret generic_secret = 5; } } ================================================ FILE: api/envoy/extensions/transport_sockets/tls/v4alpha/tls.proto ================================================ syntax = "proto3"; package envoy.extensions.transport_sockets.tls.v4alpha; import "envoy/config/core/v4alpha/extension.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/common.proto"; import "envoy/extensions/transport_sockets/tls/v4alpha/secret.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.transport_sockets.tls.v4alpha"; option java_outer_classname = "TlsProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: TLS transport socket] // [#extension: envoy.transport_sockets.tls] // The TLS contexts below provide the transport socket configuration for upstream/downstream TLS. message UpstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext"; // Common TLS context settings. // // .. attention:: // // Server certificate verification is not enabled by default. Configure // :ref:`trusted_ca` to enable // verification. CommonTlsContext common_tls_context = 1; // SNI string to use when creating TLS backend connections. string sni = 2 [(validate.rules).string = {max_bytes: 255}]; // If true, server-initiated TLS renegotiation will be allowed. // // .. attention:: // // TLS renegotiation is considered insecure and shouldn't be used unless absolutely necessary. bool allow_renegotiation = 3; // Maximum number of session keys (Pre-Shared Keys for TLSv1.3+, Session IDs and Session Tickets // for TLSv1.2 and older) to store for the purpose of session resumption. // // Defaults to 1, setting this to 0 disables session resumption. google.protobuf.UInt32Value max_session_keys = 4; } // [#next-free-field: 9] message DownstreamTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext"; enum OcspStaplePolicy { // OCSP responses are optional. If an OCSP response is absent // or expired, the associated certificate will be used for // connections without an OCSP staple. LENIENT_STAPLING = 0; // OCSP responses are optional. If an OCSP response is absent, // the associated certificate will be used without an // OCSP staple. If a response is provided but is expired, // the associated certificate will not be used for // subsequent connections. If no suitable certificate is found, // the connection is rejected. STRICT_STAPLING = 1; // OCSP responses are required. Configuration will fail if // a certificate is provided without an OCSP response. If a // response expires, the associated certificate will not be // used connections. If no suitable certificate is found, the // connection is rejected. MUST_STAPLE = 2; } // Common TLS context settings. CommonTlsContext common_tls_context = 1; // If specified, Envoy will reject connections without a valid client // certificate. google.protobuf.BoolValue require_client_certificate = 2; // If specified, Envoy will reject connections without a valid and matching SNI. // [#not-implemented-hide:] google.protobuf.BoolValue require_sni = 3; oneof session_ticket_keys_type { // TLS session ticket key settings. TlsSessionTicketKeys session_ticket_keys = 4; // Config for fetching TLS session ticket keys via SDS API. SdsSecretConfig session_ticket_keys_sds_secret_config = 5; // Config for controlling stateless TLS session resumption: setting this to true will cause the TLS // server to not issue TLS session tickets for the purposes of stateless TLS session resumption. // If set to false, the TLS server will issue TLS session tickets and encrypt/decrypt them using // the keys specified through either :ref:`session_ticket_keys ` // or :ref:`session_ticket_keys_sds_secret_config `. // If this config is set to false and no keys are explicitly configured, the TLS server will issue // TLS session tickets and encrypt/decrypt them using an internally-generated and managed key, with the // implication that sessions cannot be resumed across hot restarts or on different hosts. bool disable_stateless_session_resumption = 7; } // If specified, session_timeout will change maximum lifetime (in seconds) of TLS session // Currently this value is used as a hint to `TLS session ticket lifetime (for TLSv1.2) // ` // only seconds could be specified (fractional seconds are going to be ignored). google.protobuf.Duration session_timeout = 6 [(validate.rules).duration = { lt {seconds: 4294967296} gte {} }]; // Config for whether to use certificates if they do not have // an accompanying OCSP response or if the response expires at runtime. // Defaults to LENIENT_STAPLING OcspStaplePolicy ocsp_staple_policy = 8 [(validate.rules).enum = {defined_only: true}]; } // TLS context shared by both client and server TLS contexts. // [#next-free-field: 14] message CommonTlsContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext"; // Config for Certificate provider to get certificates. This provider should allow certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. message CertificateProvider { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProvider"; // opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "TLS" to specify a new tls-certificate. string name = 1 [(validate.rules).string = {min_len: 1}]; // Provider specific config. // Note: an implementation is expected to dedup multiple instances of the same config // to maintain a single certificate-provider instance. The sharing can happen, for // example, among multiple clusters or between the tls_certificate and validation_context // certificate providers of a cluster. // This config could be supplied inline or (in future) a named xDS resource. oneof config { option (validate.required) = true; config.core.v4alpha.TypedExtensionConfig typed_config = 2; } } // Similar to CertificateProvider above, but allows the provider instances to be configured on // the client side instead of being sent from the control plane. message CertificateProviderInstance { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext.CertificateProviderInstance"; // Provider instance name. This name must be defined in the client's configuration (e.g., a // bootstrap file) to correspond to a provider instance (i.e., the same data in the typed_config // field that would be sent in the CertificateProvider message if the config was sent by the // control plane). If not present, defaults to "default". // // Instance names should generally be defined not in terms of the underlying provider // implementation (e.g., "file_watcher") but rather in terms of the function of the // certificates (e.g., "foo_deployment_identity"). string instance_name = 1; // Opaque name used to specify certificate instances or types. For example, "ROOTCA" to specify // a root-certificate (validation context) or "example.com" to specify a certificate for a // particular domain. Not all provider instances will actually use this field, so the value // defaults to the empty string. string certificate_name = 2; } message CombinedCertificateValidationContext { option (udpa.annotations.versioning).previous_message_type = "envoy.extensions.transport_sockets.tls.v3.CommonTlsContext." "CombinedCertificateValidationContext"; // How to validate peer certificates. CertificateValidationContext default_validation_context = 1 [(validate.rules).message = {required: true}]; oneof dynamic_validation_context { // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, // or validation_context_certificate_provider_instance may be used. SdsSecretConfig validation_context_sds_secret_config = 2 [(validate.rules).message = {required: true}]; // Certificate provider for fetching validation context. // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, // or validation_context_certificate_provider_instance may be used. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 3; // Certificate provider instance for fetching validation context. // Only one of validation_context_sds_secret_config, validation_context_certificate_provider, // or validation_context_certificate_provider_instance may be used. // [#not-implemented-hide:] CertificateProviderInstance validation_context_certificate_provider_instance = 4; } } reserved 5; // TLS protocol versions, cipher suites etc. TlsParameters tls_params = 1; // :ref:`Multiple TLS certificates ` can be associated with the // same context to allow both RSA and ECDSA certificates. // // Only a single TLS certificate is supported in client contexts. In server contexts, the first // RSA certificate is used for clients that only support RSA and the first ECDSA certificate is // used for clients that support ECDSA. repeated TlsCertificate tls_certificates = 2; // Configs for fetching TLS certificates via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. repeated SdsSecretConfig tls_certificate_sds_secret_configs = 6 [(validate.rules).repeated = {max_items: 1}]; // Certificate provider for fetching TLS certificates. // [#not-implemented-hide:] CertificateProvider tls_certificate_certificate_provider = 9; // Certificate provider instance for fetching TLS certificates. // [#not-implemented-hide:] CertificateProviderInstance tls_certificate_certificate_provider_instance = 11; oneof validation_context_type { // How to validate peer certificates. CertificateValidationContext validation_context = 3; // Config for fetching validation context via SDS API. Note SDS API allows certificates to be // fetched/refreshed over the network asynchronously with respect to the TLS handshake. SdsSecretConfig validation_context_sds_secret_config = 7; // Combined certificate validation context holds a default CertificateValidationContext // and SDS config. When SDS server returns dynamic CertificateValidationContext, both dynamic // and default CertificateValidationContext are merged into a new CertificateValidationContext // for validation. This merge is done by Message::MergeFrom(), so dynamic // CertificateValidationContext overwrites singular fields in default // CertificateValidationContext, and concatenates repeated fields to default // CertificateValidationContext, and logical OR is applied to boolean fields. CombinedCertificateValidationContext combined_validation_context = 8; // Certificate provider for fetching validation context. // [#not-implemented-hide:] CertificateProvider validation_context_certificate_provider = 10; // Certificate provider instance for fetching validation context. // [#not-implemented-hide:] CertificateProviderInstance validation_context_certificate_provider_instance = 12; } // Supplies the list of ALPN protocols that the listener should expose. In // practice this is likely to be set to one of two values (see the // :ref:`codec_type // ` // parameter in the HTTP connection manager for more information): // // * "h2,http/1.1" If the listener is going to support both HTTP/2 and HTTP/1.1. // * "http/1.1" If the listener is only going to support HTTP/1.1. // // There is no default for this parameter. If empty, Envoy will not expose ALPN. repeated string alpn_protocols = 4; // Custom TLS handshaker. If empty, defaults to native TLS handshaking // behavior. config.core.v4alpha.TypedExtensionConfig custom_handshaker = 13; } ================================================ FILE: api/envoy/extensions/upstreams/http/generic/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/upstreams/http/generic/v3/generic_connection_pool.proto ================================================ syntax = "proto3"; package envoy.extensions.upstreams.http.generic.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.generic.v3"; option java_outer_classname = "GenericConnectionPoolProtoOuterClass"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Generic Connection Pool] // A connection pool which forwards downstream HTTP as TCP or HTTP to upstream, // based on CONNECT configuration. // [#extension: envoy.upstreams.http.generic] message GenericConnectionPoolProto { } ================================================ FILE: api/envoy/extensions/upstreams/http/http/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/upstreams/http/http/v3/http_connection_pool.proto ================================================ syntax = "proto3"; package envoy.extensions.upstreams.http.http.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.http.v3"; option java_outer_classname = "HttpConnectionPoolProtoOuterClass"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Http Connection Pool] // A connection pool which forwards downstream HTTP as HTTP to upstream. // [#extension: envoy.upstreams.http.http] message HttpConnectionPoolProto { } ================================================ FILE: api/envoy/extensions/upstreams/http/tcp/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/upstreams/http/tcp/v3/tcp_connection_pool.proto ================================================ syntax = "proto3"; package envoy.extensions.upstreams.http.tcp.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.extensions.upstreams.http.tcp.v3"; option java_outer_classname = "TcpConnectionPoolProtoOuterClass"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tcp Connection Pool] // A connection pool which forwards downstream HTTP as TCP to upstream, // [#extension: envoy.upstreams.http.tcp] message TcpConnectionPoolProto { } ================================================ FILE: api/envoy/extensions/wasm/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/config/core/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/extensions/wasm/v3/wasm.proto ================================================ syntax = "proto3"; package envoy.extensions.wasm.v3; import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.wasm.v3"; option java_outer_classname = "WasmProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Wasm] // [#extension: envoy.bootstrap.wasm] // Configuration for a Wasm VM. // [#next-free-field: 7] message VmConfig { // An ID which will be used along with a hash of the wasm code (or the name of the registered Null // VM plugin) to determine which VM will be used for the plugin. All plugins which use the same // *vm_id* and code will use the same VM. May be left blank. Sharing a VM between plugins can // reduce memory utilization and make sharing of data easier which may have security implications. // See ref: "TODO: add ref" for details. string vm_id = 1; // The Wasm runtime type (either "v8" or "null" for code compiled into Envoy). string runtime = 2 [(validate.rules).string = {min_len: 1}]; // The Wasm code that Envoy will execute. config.core.v3.AsyncDataSource code = 3; // The Wasm configuration used in initialization of a new VM // (proxy_on_start). `google.protobuf.Struct` is serialized as JSON before // passing it to the plugin. `google.protobuf.BytesValue` and // `google.protobuf.StringValue` are passed directly without the wrapper. google.protobuf.Any configuration = 4; // Allow the wasm file to include pre-compiled code on VMs which support it. // Warning: this should only be enable for trusted sources as the precompiled code is not // verified. bool allow_precompiled = 5; // If true and the code needs to be remotely fetched and it is not in the cache then NACK the configuration // update and do a background fetch to fill the cache, otherwise fetch the code asynchronously and enter // warming state. bool nack_on_code_cache_miss = 6; } // Base Configuration for Wasm Plugins e.g. filters and services. // [#next-free-field: 6] message PluginConfig { // A unique name for a filters/services in a VM for use in identifying the filter/service if // multiple filters/services are handled by the same *vm_id* and *root_id* and for // logging/debugging. string name = 1; // A unique ID for a set of filters/services in a VM which will share a RootContext and Contexts // if applicable (e.g. an Wasm HttpFilter and an Wasm AccessLog). If left blank, all // filters/services with a blank root_id with the same *vm_id* will share Context(s). string root_id = 2; // Configuration for finding or starting VM. oneof vm { VmConfig vm_config = 3; // TODO: add referential VM configurations. } // Filter/service configuration used to configure or reconfigure a plugin // (proxy_on_configuration). // `google.protobuf.Struct` is serialized as JSON before // passing it to the plugin. `google.protobuf.BytesValue` and // `google.protobuf.StringValue` are passed directly without the wrapper. google.protobuf.Any configuration = 4; // If there is a fatal error on the VM (e.g. exception, abort(), on_start or on_configure return false), // then all plugins associated with the VM will either fail closed (by default), e.g. by returning an HTTP 503 error, // or fail open (if 'fail_open' is set to true) by bypassing the filter. Note: when on_start or on_configure return false // during xDS updates the xDS configuration will be rejected and when on_start or on_configuration return false on initial // startup the proxy will not start. bool fail_open = 5; } // WasmService is configured as a built-in *envoy.wasm_service* :ref:`WasmService // ` This opaque configuration will be used to create a Wasm Service. message WasmService { // General plugin configuration. PluginConfig config = 1; // If true, create a single VM rather than creating one VM per worker. Such a singleton can // not be used with filters. bool singleton = 2; } ================================================ FILE: api/envoy/extensions/watchdog/abort_action/v3alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/watchdog/abort_action/v3alpha/abort_action.proto ================================================ syntax = "proto3"; package envoy.extensions.watchdog.abort_action.v3alpha; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.watchdog.abort_action.v3alpha"; option java_outer_classname = "AbortActionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Watchdog Action that sends a SIGABRT to kill the process.] // [#extension: envoy.watchdog.abort_action] // A GuardDogAction that will terminate the process by sending SIGABRT to the // stuck thread. This would allow easier access to the call stack of the stuck // thread since we would run signal handlers on that thread. This would be // more useful than the default watchdog kill behaviors since those PANIC // from the watchdog's thread. // This is currently only implemented for systems that support kill to send // signals. message AbortActionConfig { // How long to wait for the thread to respond to the SIGABRT before killing the // process from this action. This is a blocking action. google.protobuf.Duration wait_duration = 1; } ================================================ FILE: api/envoy/extensions/watchdog/profile_action/v3alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/extensions/watchdog/profile_action/v3alpha/profile_action.proto ================================================ syntax = "proto3"; package envoy.extensions.watchdog.profile_action.v3alpha; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.extensions.watchdog.profile_action.v3alpha"; option java_outer_classname = "ProfileActionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).work_in_progress = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Watchdog Action that does CPU profiling.] // [#extension: envoy.watchdog.profile_action] // Configuration for the profile watchdog action. message ProfileActionConfig { // How long the profile should last. If not set defaults to 5 seconds. google.protobuf.Duration profile_duration = 1; // File path to the directory to output profiles. string profile_path = 2 [(validate.rules).string = {min_len: 1}]; // Limits the max number of profiles that can be generated by this action // over its lifetime to avoid filling the disk. // If not set (i.e. it's 0), a default of 10 will be used. uint64 max_profiles = 3; } ================================================ FILE: api/envoy/service/README.md ================================================ Protocol buffer definitions for gRPC and REST services. Visibility should be constrained to none (default). ================================================ FILE: api/envoy/service/accesslog/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", "//envoy/data/accesslog/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/accesslog/v2/als.proto ================================================ syntax = "proto3"; package envoy.service.accesslog.v2; import "envoy/api/v2/core/base.proto"; import "envoy/data/accesslog/v2/accesslog.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.accesslog.v2"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Access Log Service (ALS)] // Service for streaming access logs from Envoy to an access log server. service AccessLogService { // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any // response to be sent as nothing would be done in the case of failure. The server should // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different // API for "critical" access logs in which Envoy will buffer access logs for some period of time // until it gets an ACK so it could then retry. This API is designed for high throughput with the // expectation that it might be lossy. rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { } } // Empty response for the StreamAccessLogs API. Will never be sent. See below. message StreamAccessLogsResponse { } // Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream // access logs without ever expecting a response. message StreamAccessLogsMessage { message Identifier { // The node sending the access log messages over the stream. api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. string log_name = 2 [(validate.rules).string = {min_bytes: 1}]; } // Wrapper for batches of HTTP access log entries. message HTTPAccessLogEntries { repeated data.accesslog.v2.HTTPAccessLogEntry log_entry = 1 [(validate.rules).repeated = {min_items: 1}]; } // Wrapper for batches of TCP access log entries. message TCPAccessLogEntries { repeated data.accesslog.v2.TCPAccessLogEntry log_entry = 1 [(validate.rules).repeated = {min_items: 1}]; } // Identifier data that will only be sent in the first message on the stream. This is effectively // structured metadata and is a performance optimization. Identifier identifier = 1; // Batches of log entries of a single type. Generally speaking, a given stream should only // ever include one type of log entry. oneof log_entries { option (validate.required) = true; HTTPAccessLogEntries http_logs = 2; TCPAccessLogEntries tcp_logs = 3; } } ================================================ FILE: api/envoy/service/accesslog/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", "//envoy/data/accesslog/v3:pkg", "//envoy/service/accesslog/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/accesslog/v3/als.proto ================================================ syntax = "proto3"; package envoy.service.accesslog.v3; import "envoy/config/core/v3/base.proto"; import "envoy/data/accesslog/v3/accesslog.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.accesslog.v3"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Access Log Service (ALS)] // Service for streaming access logs from Envoy to an access log server. service AccessLogService { // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any // response to be sent as nothing would be done in the case of failure. The server should // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different // API for "critical" access logs in which Envoy will buffer access logs for some period of time // until it gets an ACK so it could then retry. This API is designed for high throughput with the // expectation that it might be lossy. rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { } } // Empty response for the StreamAccessLogs API. Will never be sent. See below. message StreamAccessLogsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v2.StreamAccessLogsResponse"; } // Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream // access logs without ever expecting a response. message StreamAccessLogsMessage { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v2.StreamAccessLogsMessage"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v2.StreamAccessLogsMessage.Identifier"; // The node sending the access log messages over the stream. config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. string log_name = 2 [(validate.rules).string = {min_len: 1}]; } // Wrapper for batches of HTTP access log entries. message HTTPAccessLogEntries { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v2.StreamAccessLogsMessage.HTTPAccessLogEntries"; repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1 [(validate.rules).repeated = {min_items: 1}]; } // Wrapper for batches of TCP access log entries. message TCPAccessLogEntries { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v2.StreamAccessLogsMessage.TCPAccessLogEntries"; repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1 [(validate.rules).repeated = {min_items: 1}]; } // Identifier data that will only be sent in the first message on the stream. This is effectively // structured metadata and is a performance optimization. Identifier identifier = 1; // Batches of log entries of a single type. Generally speaking, a given stream should only // ever include one type of log entry. oneof log_entries { option (validate.required) = true; HTTPAccessLogEntries http_logs = 2; TCPAccessLogEntries tcp_logs = 3; } } ================================================ FILE: api/envoy/service/accesslog/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/data/accesslog/v3:pkg", "//envoy/service/accesslog/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/accesslog/v4alpha/als.proto ================================================ syntax = "proto3"; package envoy.service.accesslog.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/data/accesslog/v3/accesslog.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.accesslog.v4alpha"; option java_outer_classname = "AlsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: gRPC Access Log Service (ALS)] // Service for streaming access logs from Envoy to an access log server. service AccessLogService { // Envoy will connect and send StreamAccessLogsMessage messages forever. It does not expect any // response to be sent as nothing would be done in the case of failure. The server should // disconnect if it expects Envoy to reconnect. In the future we may decide to add a different // API for "critical" access logs in which Envoy will buffer access logs for some period of time // until it gets an ACK so it could then retry. This API is designed for high throughput with the // expectation that it might be lossy. rpc StreamAccessLogs(stream StreamAccessLogsMessage) returns (StreamAccessLogsResponse) { } } // Empty response for the StreamAccessLogs API. Will never be sent. See below. message StreamAccessLogsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v3.StreamAccessLogsResponse"; } // Stream message for the StreamAccessLogs API. Envoy will open a stream to the server and stream // access logs without ever expecting a response. message StreamAccessLogsMessage { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v3.StreamAccessLogsMessage"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v3.StreamAccessLogsMessage.Identifier"; // The node sending the access log messages over the stream. config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; // The friendly name of the log configured in :ref:`CommonGrpcAccessLogConfig // `. string log_name = 2 [(validate.rules).string = {min_len: 1}]; } // Wrapper for batches of HTTP access log entries. message HTTPAccessLogEntries { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v3.StreamAccessLogsMessage.HTTPAccessLogEntries"; repeated data.accesslog.v3.HTTPAccessLogEntry log_entry = 1 [(validate.rules).repeated = {min_items: 1}]; } // Wrapper for batches of TCP access log entries. message TCPAccessLogEntries { option (udpa.annotations.versioning).previous_message_type = "envoy.service.accesslog.v3.StreamAccessLogsMessage.TCPAccessLogEntries"; repeated data.accesslog.v3.TCPAccessLogEntry log_entry = 1 [(validate.rules).repeated = {min_items: 1}]; } // Identifier data that will only be sent in the first message on the stream. This is effectively // structured metadata and is a performance optimization. Identifier identifier = 1; // Batches of log entries of a single type. Generally speaking, a given stream should only // ever include one type of log entry. oneof log_entries { option (validate.required) = true; HTTPAccessLogEntries http_logs = 2; TCPAccessLogEntries tcp_logs = 3; } } ================================================ FILE: api/envoy/service/auth/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/auth/v2/attribute_context.proto ================================================ syntax = "proto3"; package envoy.service.auth.v2; import "envoy/api/v2/core/address.proto"; import "envoy/api/v2/core/base.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v2"; option java_outer_classname = "AttributeContextProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Attribute Context ] // See :ref:`network filter configuration overview ` // and :ref:`HTTP filter configuration overview `. // An attribute is a piece of metadata that describes an activity on a network. // For example, the size of an HTTP request, or the status code of an HTTP response. // // Each attribute has a type and a name, which is logically defined as a proto message field // of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes // supported by Envoy authorization system. // [#comment: The following items are left out of this proto // Request.Auth field for jwt tokens // Request.Api for api management // Origin peer that originated the request // Caching Protocol // request_context return values to inject back into the filter chain // peer.claims -- from X.509 extensions // Configuration // - field mask to send // - which return values from request_context are copied back // - which return values are copied into request_headers] // [#next-free-field: 12] message AttributeContext { // This message defines attributes for a node that handles a network request. // The node can be either a service or an application that sends, forwards, // or receives the request. Service peers should fill in the `service`, // `principal`, and `labels` as appropriate. // [#next-free-field: 6] message Peer { // The address of the peer, this is typically the IP address. // It can also be UDS path, or others. api.v2.core.Address address = 1; // The canonical service name of the peer. // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster // ` // If a more trusted source of the service name is available through mTLS/secure naming, it // should be used. string service = 2; // The labels associated with the peer. // These could be pod labels for Kubernetes or tags for VMs. // The source of the labels could be an X.509 certificate or other configuration. map labels = 3; // The authenticated identity of this peer. // For example, the identity associated with the workload such as a service account. // If an X.509 certificate is used to assert the identity this field should be sourced from // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. // The primary identity should be the principal. The principal format is issuer specific. // // Example: // * SPIFFE format is `spiffe://trust-domain/path` // * Google account format is `https://accounts.google.com/{userid}` string principal = 4; // The X.509 certificate used to authenticate the identify of this peer. // When present, the certificate contents are encoded in URL and PEM format. string certificate = 5; } // Represents a network request, such as an HTTP request. message Request { // The timestamp when the proxy receives the first byte of the request. google.protobuf.Timestamp time = 1; // Represents an HTTP request or an HTTP-like request. HttpRequest http = 2; } // This message defines attributes for an HTTP request. // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. // [#next-free-field: 12] message HttpRequest { // The unique ID for a request, which can be propagated to downstream // systems. The ID should have low probability of collision // within a single day for a specific service. // For HTTP requests, it should be X-Request-ID or equivalent. string id = 1; // The HTTP request method, such as `GET`, `POST`. string method = 2; // The HTTP request headers. If multiple headers share the same key, they // must be merged according to the HTTP spec. All header keys must be // lower-cased, because HTTP header keys are case-insensitive. map headers = 3; // The request target, as it appears in the first line of the HTTP request. This includes // the URL path and query-string. No decoding is performed. string path = 4; // The HTTP request `Host` or 'Authority` header value. string host = 5; // The HTTP URL scheme, such as `http` and `https`. string scheme = 6; // This field is always empty, and exists for compatibility reasons. The HTTP URL query is // included in `path` field. string query = 7; // This field is always empty, and exists for compatibility reasons. The URL fragment is // not submitted as part of HTTP requests; it is unknowable. string fragment = 8; // The HTTP request size in bytes. If unknown, it must be -1. int64 size = 9; // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". // // See :repo:`headers.h:ProtocolStrings ` for a list of all // possible values. string protocol = 10; // The HTTP request body. string body = 11; } // The source of a network activity, such as starting a TCP connection. // In a multi hop network activity, the source represents the sender of the // last hop. Peer source = 1; // The destination of a network activity, such as accepting a TCP connection. // In a multi hop network activity, the destination represents the receiver of // the last hop. Peer destination = 2; // Represents a network request, such as an HTTP request. Request request = 4; // This is analogous to http_request.headers, however these contents will not be sent to the // upstream server. Context_extensions provide an extension mechanism for sending additional // information to the auth server without modifying the proto definition. It maps to the // internal opaque context in the filter chain. map context_extensions = 10; // Dynamic metadata associated with the request. api.v2.core.Metadata metadata_context = 11; } ================================================ FILE: api/envoy/service/auth/v2/external_auth.proto ================================================ syntax = "proto3"; package envoy.service.auth.v2; import "envoy/api/v2/core/base.proto"; import "envoy/service/auth/v2/attribute_context.proto"; import "envoy/type/http_status.proto"; import "google/rpc/status.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v2"; option java_outer_classname = "ExternalAuthProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Authorization Service ] // The authorization service request messages used by external authorization :ref:`network filter // ` and :ref:`HTTP filter `. // A generic interface for performing authorization check on incoming // requests to a networked service. service Authorization { // Performs authorization check based on the attributes associated with the // incoming request, and returns status `OK` or not `OK`. rpc Check(CheckRequest) returns (CheckResponse) { } } message CheckRequest { // The request attributes. AttributeContext attributes = 1; } // HTTP attributes for a denied response. message DeniedHttpResponse { // This field allows the authorization service to send a HTTP response status // code to the downstream client other than 403 (Forbidden). type.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to // false when used in this message. repeated api.v2.core.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data // to the downstream client. string body = 3; } // HTTP attributes for an ok response. message OkHttpResponse { // HTTP entity headers in addition to the original request headers. This allows the authorization // service to append, to add or to override headers from the original request before // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to // false when used in this message. By setting the `append` field to `true`, // the filter will append the correspondent header value to the matched request header. // By leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated api.v2.core.HeaderValueOption headers = 2; } // Intended for gRPC and Network Authorization servers `only`. message CheckResponse { // Status `OK` allows the request. Any other status indicates the request should be denied. google.rpc.Status status = 1; // An message that contains HTTP response attributes. This message is // used when the authorization service needs to send custom responses to the // downstream client or, to modify/add request headers being dispatched to the upstream. oneof http_response { // Supplies http attributes for a denied response. DeniedHttpResponse denied_response = 2; // Supplies http attributes for an ok response. OkHttpResponse ok_response = 3; } } ================================================ FILE: api/envoy/service/auth/v2alpha/BUILD ================================================ load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. api_proto_package( has_services = True, deps = ["//envoy/service/auth/v2:pkg"], ) ================================================ FILE: api/envoy/service/auth/v2alpha/external_auth.proto ================================================ syntax = "proto3"; package envoy.service.auth.v2alpha; option java_multiple_files = true; option java_generic_services = true; option java_outer_classname = "CertsProto"; option java_package = "io.envoyproxy.envoy.service.auth.v2alpha"; import "envoy/service/auth/v2/external_auth.proto"; // [#protodoc-title: Authorization Service ] // The authorization service request messages used by external authorization :ref:`network filter // ` and :ref:`HTTP filter `. // A generic interface for performing authorization check on incoming // requests to a networked service. service Authorization { // Performs authorization check based on the attributes associated with the // incoming request, and returns status `OK` or not `OK`. rpc Check(v2.CheckRequest) returns (v2.CheckResponse); } ================================================ FILE: api/envoy/service/auth/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", "//envoy/service/auth/v2:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/auth/v3/attribute_context.proto ================================================ syntax = "proto3"; package envoy.service.auth.v3; import "envoy/config/core/v3/address.proto"; import "envoy/config/core/v3/base.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v3"; option java_outer_classname = "AttributeContextProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Attribute Context ] // See :ref:`network filter configuration overview ` // and :ref:`HTTP filter configuration overview `. // An attribute is a piece of metadata that describes an activity on a network. // For example, the size of an HTTP request, or the status code of an HTTP response. // // Each attribute has a type and a name, which is logically defined as a proto message field // of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes // supported by Envoy authorization system. // [#comment: The following items are left out of this proto // Request.Auth field for jwt tokens // Request.Api for api management // Origin peer that originated the request // Caching Protocol // request_context return values to inject back into the filter chain // peer.claims -- from X.509 extensions // Configuration // - field mask to send // - which return values from request_context are copied back // - which return values are copied into request_headers] // [#next-free-field: 12] message AttributeContext { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.AttributeContext"; // This message defines attributes for a node that handles a network request. // The node can be either a service or an application that sends, forwards, // or receives the request. Service peers should fill in the `service`, // `principal`, and `labels` as appropriate. // [#next-free-field: 6] message Peer { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.AttributeContext.Peer"; // The address of the peer, this is typically the IP address. // It can also be UDS path, or others. config.core.v3.Address address = 1; // The canonical service name of the peer. // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster // ` // If a more trusted source of the service name is available through mTLS/secure naming, it // should be used. string service = 2; // The labels associated with the peer. // These could be pod labels for Kubernetes or tags for VMs. // The source of the labels could be an X.509 certificate or other configuration. map labels = 3; // The authenticated identity of this peer. // For example, the identity associated with the workload such as a service account. // If an X.509 certificate is used to assert the identity this field should be sourced from // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. // The primary identity should be the principal. The principal format is issuer specific. // // Example: // * SPIFFE format is `spiffe://trust-domain/path` // * Google account format is `https://accounts.google.com/{userid}` string principal = 4; // The X.509 certificate used to authenticate the identify of this peer. // When present, the certificate contents are encoded in URL and PEM format. string certificate = 5; } // Represents a network request, such as an HTTP request. message Request { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.AttributeContext.Request"; // The timestamp when the proxy receives the first byte of the request. google.protobuf.Timestamp time = 1; // Represents an HTTP request or an HTTP-like request. HttpRequest http = 2; } // This message defines attributes for an HTTP request. // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. // [#next-free-field: 13] message HttpRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.AttributeContext.HttpRequest"; // The unique ID for a request, which can be propagated to downstream // systems. The ID should have low probability of collision // within a single day for a specific service. // For HTTP requests, it should be X-Request-ID or equivalent. string id = 1; // The HTTP request method, such as `GET`, `POST`. string method = 2; // The HTTP request headers. If multiple headers share the same key, they // must be merged according to the HTTP spec. All header keys must be // lower-cased, because HTTP header keys are case-insensitive. map headers = 3; // The request target, as it appears in the first line of the HTTP request. This includes // the URL path and query-string. No decoding is performed. string path = 4; // The HTTP request `Host` or 'Authority` header value. string host = 5; // The HTTP URL scheme, such as `http` and `https`. string scheme = 6; // This field is always empty, and exists for compatibility reasons. The HTTP URL query is // included in `path` field. string query = 7; // This field is always empty, and exists for compatibility reasons. The URL fragment is // not submitted as part of HTTP requests; it is unknowable. string fragment = 8; // The HTTP request size in bytes. If unknown, it must be -1. int64 size = 9; // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". // // See :repo:`headers.h:ProtocolStrings ` for a list of all // possible values. string protocol = 10; // The HTTP request body. string body = 11; // The HTTP request body in bytes. This is used instead of // :ref:`body ` when // :ref:`pack_as_bytes ` // is set to true. bytes raw_body = 12; } // The source of a network activity, such as starting a TCP connection. // In a multi hop network activity, the source represents the sender of the // last hop. Peer source = 1; // The destination of a network activity, such as accepting a TCP connection. // In a multi hop network activity, the destination represents the receiver of // the last hop. Peer destination = 2; // Represents a network request, such as an HTTP request. Request request = 4; // This is analogous to http_request.headers, however these contents will not be sent to the // upstream server. Context_extensions provide an extension mechanism for sending additional // information to the auth server without modifying the proto definition. It maps to the // internal opaque context in the filter chain. map context_extensions = 10; // Dynamic metadata associated with the request. config.core.v3.Metadata metadata_context = 11; } ================================================ FILE: api/envoy/service/auth/v3/external_auth.proto ================================================ syntax = "proto3"; package envoy.service.auth.v3; import "envoy/config/core/v3/base.proto"; import "envoy/service/auth/v3/attribute_context.proto"; import "envoy/type/v3/http_status.proto"; import "google/protobuf/struct.proto"; import "google/rpc/status.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v3"; option java_outer_classname = "ExternalAuthProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Authorization Service ] // The authorization service request messages used by external authorization :ref:`network filter // ` and :ref:`HTTP filter `. // A generic interface for performing authorization check on incoming // requests to a networked service. service Authorization { // Performs authorization check based on the attributes associated with the // incoming request, and returns status `OK` or not `OK`. rpc Check(CheckRequest) returns (CheckResponse) { } } message CheckRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.CheckRequest"; // The request attributes. AttributeContext attributes = 1; } // HTTP attributes for a denied response. message DeniedHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.DeniedHttpResponse"; // This field allows the authorization service to send a HTTP response status // code to the downstream client other than 403 (Forbidden). type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to // false when used in this message. repeated config.core.v3.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data // to the downstream client. string body = 3; } // HTTP attributes for an OK response. // [#next-free-field: 6] message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.OkHttpResponse"; // HTTP entity headers in addition to the original request headers. This allows the authorization // service to append, to add or to override headers from the original request before // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to // false when used in this message. By setting the `append` field to `true`, // the filter will append the correspondent header value to the matched request header. // By leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v3.HeaderValueOption headers = 2; // HTTP entity headers to remove from the original request before dispatching // it to the upstream. This allows the authorization service to act on auth // related headers (like `Authorization`), process them, and consume them. // Under this model, the upstream will either receive the request (if it's // authorized) or not receive it (if it's not), but will not see headers // containing authorization credentials. // // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as // the header `Host`, may not be removed as that would make the request // malformed. If mentioned in `headers_to_remove` these special headers will // be ignored. // // When using the HTTP service this must instead be set by the HTTP // authorization service as a comma separated list like so: // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. repeated string headers_to_remove = 5; // This field has been deprecated in favor of :ref:`CheckResponse.dynamic_metadata // `. Until it is removed, // setting this field overrides :ref:`CheckResponse.dynamic_metadata // `. google.protobuf.Struct dynamic_metadata = 3 [deprecated = true]; } // Intended for gRPC and Network Authorization servers `only`. message CheckResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v2.CheckResponse"; // Status `OK` allows the request. Any other status indicates the request should be denied. google.rpc.Status status = 1; // An message that contains HTTP response attributes. This message is // used when the authorization service needs to send custom responses to the // downstream client or, to modify/add request headers being dispatched to the upstream. oneof http_response { // Supplies http attributes for a denied response. DeniedHttpResponse denied_response = 2; // Supplies http attributes for an ok response. OkHttpResponse ok_response = 3; } // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next // filter. This metadata lives in a namespace specified by the canonical name of extension filter // that requires it: // // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. // - :ref:`envoy.filters.network.ext_authz ` for network filter. google.protobuf.Struct dynamic_metadata = 4; } ================================================ FILE: api/envoy/service/auth/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/service/auth/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/auth/v4alpha/attribute_context.proto ================================================ syntax = "proto3"; package envoy.service.auth.v4alpha; import "envoy/config/core/v4alpha/address.proto"; import "envoy/config/core/v4alpha/base.proto"; import "google/protobuf/timestamp.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v4alpha"; option java_outer_classname = "AttributeContextProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Attribute Context ] // See :ref:`network filter configuration overview ` // and :ref:`HTTP filter configuration overview `. // An attribute is a piece of metadata that describes an activity on a network. // For example, the size of an HTTP request, or the status code of an HTTP response. // // Each attribute has a type and a name, which is logically defined as a proto message field // of the `AttributeContext`. The `AttributeContext` is a collection of individual attributes // supported by Envoy authorization system. // [#comment: The following items are left out of this proto // Request.Auth field for jwt tokens // Request.Api for api management // Origin peer that originated the request // Caching Protocol // request_context return values to inject back into the filter chain // peer.claims -- from X.509 extensions // Configuration // - field mask to send // - which return values from request_context are copied back // - which return values are copied into request_headers] // [#next-free-field: 12] message AttributeContext { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.AttributeContext"; // This message defines attributes for a node that handles a network request. // The node can be either a service or an application that sends, forwards, // or receives the request. Service peers should fill in the `service`, // `principal`, and `labels` as appropriate. // [#next-free-field: 6] message Peer { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.AttributeContext.Peer"; // The address of the peer, this is typically the IP address. // It can also be UDS path, or others. config.core.v4alpha.Address address = 1; // The canonical service name of the peer. // It should be set to :ref:`the HTTP x-envoy-downstream-service-cluster // ` // If a more trusted source of the service name is available through mTLS/secure naming, it // should be used. string service = 2; // The labels associated with the peer. // These could be pod labels for Kubernetes or tags for VMs. // The source of the labels could be an X.509 certificate or other configuration. map labels = 3; // The authenticated identity of this peer. // For example, the identity associated with the workload such as a service account. // If an X.509 certificate is used to assert the identity this field should be sourced from // `URI Subject Alternative Names`, `DNS Subject Alternate Names` or `Subject` in that order. // The primary identity should be the principal. The principal format is issuer specific. // // Example: // * SPIFFE format is `spiffe://trust-domain/path` // * Google account format is `https://accounts.google.com/{userid}` string principal = 4; // The X.509 certificate used to authenticate the identify of this peer. // When present, the certificate contents are encoded in URL and PEM format. string certificate = 5; } // Represents a network request, such as an HTTP request. message Request { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.AttributeContext.Request"; // The timestamp when the proxy receives the first byte of the request. google.protobuf.Timestamp time = 1; // Represents an HTTP request or an HTTP-like request. HttpRequest http = 2; } // This message defines attributes for an HTTP request. // HTTP/1.x, HTTP/2, gRPC are all considered as HTTP requests. // [#next-free-field: 13] message HttpRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.AttributeContext.HttpRequest"; // The unique ID for a request, which can be propagated to downstream // systems. The ID should have low probability of collision // within a single day for a specific service. // For HTTP requests, it should be X-Request-ID or equivalent. string id = 1; // The HTTP request method, such as `GET`, `POST`. string method = 2; // The HTTP request headers. If multiple headers share the same key, they // must be merged according to the HTTP spec. All header keys must be // lower-cased, because HTTP header keys are case-insensitive. map headers = 3; // The request target, as it appears in the first line of the HTTP request. This includes // the URL path and query-string. No decoding is performed. string path = 4; // The HTTP request `Host` or 'Authority` header value. string host = 5; // The HTTP URL scheme, such as `http` and `https`. string scheme = 6; // This field is always empty, and exists for compatibility reasons. The HTTP URL query is // included in `path` field. string query = 7; // This field is always empty, and exists for compatibility reasons. The URL fragment is // not submitted as part of HTTP requests; it is unknowable. string fragment = 8; // The HTTP request size in bytes. If unknown, it must be -1. int64 size = 9; // The network protocol used with the request, such as "HTTP/1.0", "HTTP/1.1", or "HTTP/2". // // See :repo:`headers.h:ProtocolStrings ` for a list of all // possible values. string protocol = 10; // The HTTP request body. string body = 11; // The HTTP request body in bytes. This is used instead of // :ref:`body ` when // :ref:`pack_as_bytes ` // is set to true. bytes raw_body = 12; } // The source of a network activity, such as starting a TCP connection. // In a multi hop network activity, the source represents the sender of the // last hop. Peer source = 1; // The destination of a network activity, such as accepting a TCP connection. // In a multi hop network activity, the destination represents the receiver of // the last hop. Peer destination = 2; // Represents a network request, such as an HTTP request. Request request = 4; // This is analogous to http_request.headers, however these contents will not be sent to the // upstream server. Context_extensions provide an extension mechanism for sending additional // information to the auth server without modifying the proto definition. It maps to the // internal opaque context in the filter chain. map context_extensions = 10; // Dynamic metadata associated with the request. config.core.v4alpha.Metadata metadata_context = 11; } ================================================ FILE: api/envoy/service/auth/v4alpha/external_auth.proto ================================================ syntax = "proto3"; package envoy.service.auth.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/service/auth/v4alpha/attribute_context.proto"; import "envoy/type/v3/http_status.proto"; import "google/protobuf/struct.proto"; import "google/rpc/status.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.auth.v4alpha"; option java_outer_classname = "ExternalAuthProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Authorization Service ] // The authorization service request messages used by external authorization :ref:`network filter // ` and :ref:`HTTP filter `. // A generic interface for performing authorization check on incoming // requests to a networked service. service Authorization { // Performs authorization check based on the attributes associated with the // incoming request, and returns status `OK` or not `OK`. rpc Check(CheckRequest) returns (CheckResponse) { } } message CheckRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.CheckRequest"; // The request attributes. AttributeContext attributes = 1; } // HTTP attributes for a denied response. message DeniedHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.DeniedHttpResponse"; // This field allows the authorization service to send a HTTP response status // code to the downstream client other than 403 (Forbidden). type.v3.HttpStatus status = 1 [(validate.rules).message = {required: true}]; // This field allows the authorization service to send HTTP response headers // to the downstream client. Note that the `append` field in `HeaderValueOption` defaults to // false when used in this message. repeated config.core.v4alpha.HeaderValueOption headers = 2; // This field allows the authorization service to send a response body data // to the downstream client. string body = 3; } // HTTP attributes for an OK response. // [#next-free-field: 6] message OkHttpResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.OkHttpResponse"; reserved 3; reserved "dynamic_metadata"; // HTTP entity headers in addition to the original request headers. This allows the authorization // service to append, to add or to override headers from the original request before // dispatching it to the upstream. Note that the `append` field in `HeaderValueOption` defaults to // false when used in this message. By setting the `append` field to `true`, // the filter will append the correspondent header value to the matched request header. // By leaving `append` as false, the filter will either add a new header, or override an existing // one if there is a match. repeated config.core.v4alpha.HeaderValueOption headers = 2; // HTTP entity headers to remove from the original request before dispatching // it to the upstream. This allows the authorization service to act on auth // related headers (like `Authorization`), process them, and consume them. // Under this model, the upstream will either receive the request (if it's // authorized) or not receive it (if it's not), but will not see headers // containing authorization credentials. // // Pseudo headers (such as `:authority`, `:method`, `:path` etc), as well as // the header `Host`, may not be removed as that would make the request // malformed. If mentioned in `headers_to_remove` these special headers will // be ignored. // // When using the HTTP service this must instead be set by the HTTP // authorization service as a comma separated list like so: // ``x-envoy-auth-headers-to-remove: one-auth-header, another-auth-header``. repeated string headers_to_remove = 5; } // Intended for gRPC and Network Authorization servers `only`. message CheckResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.auth.v3.CheckResponse"; // Status `OK` allows the request. Any other status indicates the request should be denied. google.rpc.Status status = 1; // An message that contains HTTP response attributes. This message is // used when the authorization service needs to send custom responses to the // downstream client or, to modify/add request headers being dispatched to the upstream. oneof http_response { // Supplies http attributes for a denied response. DeniedHttpResponse denied_response = 2; // Supplies http attributes for an ok response. OkHttpResponse ok_response = 3; } // Optional response metadata that will be emitted as dynamic metadata to be consumed by the next // filter. This metadata lives in a namespace specified by the canonical name of extension filter // that requires it: // // - :ref:`envoy.filters.http.ext_authz ` for HTTP filter. // - :ref:`envoy.filters.network.ext_authz ` for network filter. google.protobuf.Struct dynamic_metadata = 4; } ================================================ FILE: api/envoy/service/cluster/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/annotations:pkg", "//envoy/api/v2:pkg", "//envoy/service/discovery/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/cluster/v3/cds.proto ================================================ syntax = "proto3"; package envoy.service.cluster.v3; import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.cluster.v3"; option java_outer_classname = "CdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: CDS] // Return list of all clusters this proxy will load balance to. service ClusterDiscoveryService { option (envoy.annotations.resource).type = "envoy.config.cluster.v3.Cluster"; rpc StreamClusters(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } rpc DeltaClusters(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } rpc FetchClusters(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { option (google.api.http).post = "/v3/discovery:clusters"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message CdsDummy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.CdsDummy"; } ================================================ FILE: api/envoy/service/discovery/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/annotations:pkg", "//envoy/api/v2:pkg", "//envoy/api/v2/core:pkg", "//envoy/api/v2/endpoint:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/discovery/v2/ads.proto ================================================ syntax = "proto3"; package envoy.service.discovery.v2; import "envoy/api/v2/discovery.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "AdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Aggregated Discovery Service (ADS)] // [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, // and listeners are retained in the package `envoy.api.v2` for backwards // compatibility with existing management servers. New development in discovery // services should proceed in the package `envoy.service.discovery.v2`. // See https://github.com/lyft/envoy-api#apis for a description of the role of // ADS and how it is intended to be used by a management server. ADS requests // have the same structure as their singleton xDS counterparts, but can // multiplex many resource types on a single stream. The type_url in the // DiscoveryRequest/DiscoveryResponse provides sufficient information to recover // the multiplexed singleton APIs at the Envoy instance and management server. service AggregatedDiscoveryService { // This is a gRPC-only API. rpc StreamAggregatedResources(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { } rpc DeltaAggregatedResources(stream api.v2.DeltaDiscoveryRequest) returns (stream api.v2.DeltaDiscoveryResponse) { } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 message AdsDummy { } ================================================ FILE: api/envoy/service/discovery/v2/hds.proto ================================================ syntax = "proto3"; package envoy.service.discovery.v2; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/health_check.proto"; import "envoy/api/v2/endpoint/endpoint_components.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "HdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.health.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Health Discovery Service (HDS)] // HDS is Health Discovery Service. It compliments Envoy’s health checking // service by designating this Envoy to be a healthchecker for a subset of hosts // in the cluster. The status of these health checks will be reported to the // management server, where it can be aggregated etc and redistributed back to // Envoy through EDS. service HealthDiscoveryService { // 1. Envoy starts up and if its can_healthcheck option in the static // bootstrap config is enabled, sends HealthCheckRequest to the management // server. It supplies its capabilities (which protocol it can health check // with, what zone it resides in, etc.). // 2. In response to (1), the management server designates this Envoy as a // healthchecker to health check a subset of all upstream hosts for a given // cluster (for example upstream Host 1 and Host 2). It streams // HealthCheckSpecifier messages with cluster related configuration for all // clusters this Envoy is designated to health check. Subsequent // HealthCheckSpecifier message will be sent on changes to: // a. Endpoints to health checks // b. Per cluster configuration change // 3. Envoy creates a health probe based on the HealthCheck config and sends // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck // configuration Envoy waits upon the arrival of the probe response and // looks at the content of the response to decide whether the endpoint is // healthy or not. If a response hasn't been received within the timeout // interval, the endpoint health status is considered TIMEOUT. // 4. Envoy reports results back in an EndpointHealthResponse message. // Envoy streams responses as often as the interval configured by the // management server in HealthCheckSpecifier. // 5. The management Server collects health statuses for all endpoints in the // cluster (for all clusters) and uses this information to construct // EndpointDiscoveryResponse messages. // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load // balances traffic to them without additional health checking. It may // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection // failed to a particular endpoint to account for health status propagation // delay between HDS and EDS). // By default, can_healthcheck is true. If can_healthcheck is false, Cluster // configuration may not contain HealthCheck message. // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above // invariant? // TODO(htuch): Add @amb67's diagram. rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) returns (stream HealthCheckSpecifier) { } // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of // request/response. Should we add an identifier to the HealthCheckSpecifier // to bind with the response? rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { option (google.api.http).post = "/v2/discovery:health_check"; option (google.api.http).body = "*"; } } // Defines supported protocols etc, so the management server can assign proper // endpoints to healthcheck. message Capability { // Different Envoy instances may have different capabilities (e.g. Redis) // and/or have ports enabled for different protocols. enum Protocol { HTTP = 0; TCP = 1; REDIS = 2; } repeated Protocol health_check_protocols = 1; } message HealthCheckRequest { api.v2.core.Node node = 1; Capability capability = 2; } message EndpointHealth { api.v2.endpoint.Endpoint endpoint = 1; api.v2.core.HealthStatus health_status = 2; } message EndpointHealthResponse { repeated EndpointHealth endpoints_health = 1; } message HealthCheckRequestOrEndpointHealthResponse { oneof request_type { HealthCheckRequest health_check_request = 1; EndpointHealthResponse endpoint_health_response = 2; } } message LocalityEndpoints { api.v2.core.Locality locality = 1; repeated api.v2.endpoint.Endpoint endpoints = 2; } // The cluster name and locality is provided to Envoy for the endpoints that it // health checks to support statistics reporting, logging and debugging by the // Envoy instance (outside of HDS). For maximum usefulness, it should match the // same cluster structure as that provided by EDS. message ClusterHealthCheck { string cluster_name = 1; repeated api.v2.core.HealthCheck health_checks = 2; repeated LocalityEndpoints locality_endpoints = 3; } message HealthCheckSpecifier { repeated ClusterHealthCheck cluster_health_checks = 1; // The default is 1 second. google.protobuf.Duration interval = 2; } ================================================ FILE: api/envoy/service/discovery/v2/rtds.proto ================================================ syntax = "proto3"; package envoy.service.discovery.v2; import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "RtdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.runtime.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Runtime Discovery Service (RTDS)] // RTDS :ref:`configuration overview ` // Discovery service for Runtime resources. service RuntimeDiscoveryService { option (envoy.annotations.resource).type = "envoy.service.discovery.v2.Runtime"; rpc StreamRuntime(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { } rpc DeltaRuntime(stream api.v2.DeltaDiscoveryRequest) returns (stream api.v2.DeltaDiscoveryResponse) { } rpc FetchRuntime(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { option (google.api.http).post = "/v2/discovery:runtime"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 message RtdsDummy { } // RTDS resource type. This describes a layer in the runtime virtual filesystem. message Runtime { // Runtime resource name. This makes the Runtime a self-describing xDS // resource. string name = 1 [(validate.rules).string = {min_bytes: 1}]; google.protobuf.Struct layer = 2; } ================================================ FILE: api/envoy/service/discovery/v2/sds.proto ================================================ syntax = "proto3"; package envoy.service.discovery.v2; import "envoy/api/v2/discovery.proto"; import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v2"; option java_outer_classname = "SdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.secret.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Secret Discovery Service (SDS)] service SecretDiscoveryService { option (envoy.annotations.resource).type = "envoy.api.v2.auth.Secret"; rpc DeltaSecrets(stream api.v2.DeltaDiscoveryRequest) returns (stream api.v2.DeltaDiscoveryResponse) { } rpc StreamSecrets(stream api.v2.DiscoveryRequest) returns (stream api.v2.DiscoveryResponse) { } rpc FetchSecrets(api.v2.DiscoveryRequest) returns (api.v2.DiscoveryResponse) { option (google.api.http).post = "/v2/discovery:secrets"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 message SdsDummy { } ================================================ FILE: api/envoy/service/discovery/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/api/v2:pkg", "//envoy/config/core/v3:pkg", "//envoy/service/discovery/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/service/discovery/v3/ads.proto ================================================ syntax = "proto3"; package envoy.service.discovery.v3; import "envoy/service/discovery/v3/discovery.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v3"; option java_outer_classname = "AdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Aggregated Discovery Service (ADS)] // [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, // and listeners are retained in the package `envoy.api.v2` for backwards // compatibility with existing management servers. New development in discovery // services should proceed in the package `envoy.service.discovery.v2`. // See https://github.com/lyft/envoy-api#apis for a description of the role of // ADS and how it is intended to be used by a management server. ADS requests // have the same structure as their singleton xDS counterparts, but can // multiplex many resource types on a single stream. The type_url in the // DiscoveryRequest/DiscoveryResponse provides sufficient information to recover // the multiplexed singleton APIs at the Envoy instance and management server. service AggregatedDiscoveryService { // This is a gRPC-only API. rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 message AdsDummy { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.AdsDummy"; } ================================================ FILE: api/envoy/service/discovery/v3/discovery.proto ================================================ syntax = "proto3"; package envoy.service.discovery.v3; import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; import "udpa/core/v1/resource_locator.proto"; import "udpa/core/v1/resource_name.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v3"; option java_outer_classname = "DiscoveryProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Common discovery API components] // A DiscoveryRequest requests a set of versioned resources of the same type for // a given Envoy node on some API. // [#next-free-field: 7] message DiscoveryRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryRequest"; // The version_info provided in the request messages will be the version_info // received with the most recent successfully processed response or empty on // the first request. It is expected that no new request is sent after a // response is received until the Envoy instance is ready to ACK/NACK the new // configuration. ACK/NACK takes place by returning the new API config version // as applied or the previous API config version respectively. Each type_url // (see below) has an independent version associated with it. string version_info = 1; // The node making the request. config.core.v3.Node node = 2; // List of resources to subscribe to, e.g. list of cluster names or a route // configuration name. If this is empty, all resources for the API are // returned. LDS/CDS may have empty resource_names, which will cause all // resources for the Envoy instance to be returned. The LDS and CDS responses // will then imply a number of resources that need to be fetched via EDS/RDS, // which will be explicitly enumerated in resource_names. repeated string resource_names = 3; // Type of the resource that is being requested, e.g. // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is // required for ADS. string type_url = 4; // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above // discussion on version_info and the DiscoveryResponse nonce comment. This // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, // or 2) the client has not yet accepted an update in this xDS stream (unlike // delta, where it is populated only for new explicit ACKs). string response_nonce = 5; // This is populated when the previous :ref:`DiscoveryResponse ` // failed to update configuration. The *message* field in *error_details* provides the Envoy // internal exception related to the failure. It is only intended for consumption during manual // debugging, the string provided is not guaranteed to be stable across Envoy versions. google.rpc.Status error_detail = 6; } // [#next-free-field: 7] message DiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DiscoveryResponse"; // The version of the response data. string version_info = 1; // The response resources. These resources are typed and depend on the API being called. repeated google.protobuf.Any resources = 2; // [#not-implemented-hide:] // Canary is used to support two Envoy command line flags: // // * --terminate-on-canary-transition-failure. When set, Envoy is able to // terminate if it detects that configuration is stuck at canary. Consider // this example sequence of updates: // - Management server applies a canary config successfully. // - Management server rolls back to a production config. // - Envoy rejects the new production config. // Since there is no sensible way to continue receiving configuration // updates, Envoy will then terminate and apply production config from a // clean slate. // * --dry-run-canary. When set, a canary response will never be applied, only // validated via a dry run. bool canary = 3; // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). string type_url = 4; // For gRPC based subscriptions, the nonce provides a way to explicitly ack a // specific DiscoveryResponse in a following DiscoveryRequest. Additional // messages may have been sent by Envoy to the management server for the // previous version on the stream prior to this DiscoveryResponse, that were // unprocessed at response send time. The nonce allows the management server // to ignore any further DiscoveryRequests for the previous version until a // DiscoveryRequest bearing the nonce. The nonce is optional and is not // required for non-stream based xDS implementations. string nonce = 5; // [#not-implemented-hide:] // The control plane instance that sent the response. config.core.v3.ControlPlane control_plane = 6; } // DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC // endpoint for Delta xDS. // // With Delta xDS, the DeltaDiscoveryResponses do not need to include a full // snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a // diff to the state of a xDS client. // In Delta XDS there are per-resource versions, which allow tracking state at // the resource granularity. // An xDS Delta session is always in the context of a gRPC bidirectional // stream. This allows the xDS server to keep track of the state of xDS clients // connected to it. // // In Delta xDS the nonce field is required and used to pair // DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. // Optionally, a response message level system_version_info is present for // debugging purposes only. // // DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest // can be either or both of: [1] informing the server of what resources the // client has gained/lost interest in (using resource_names_subscribe and // resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from // the server (using response_nonce, with presence of error_detail making it a NACK). // Additionally, the first message (for a given type_url) of a reconnected gRPC stream // has a third role: informing the server of the resources (and their versions) // that the client already possesses, using the initial_resource_versions field. // // As with state-of-the-world, when multiple resource types are multiplexed (ADS), // all requests/acknowledgments/updates are logically walled off by type_url: // a Cluster ACK exists in a completely separate world from a prior Route NACK. // In particular, initial_resource_versions being sent at the "start" of every // gRPC stream actually entails a message for each type_url, each with its own // initial_resource_versions. // [#next-free-field: 10] message DeltaDiscoveryRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryRequest"; // The node making the request. config.core.v3.Node node = 1; // Type of the resource that is being requested, e.g. // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if // resources are only referenced via *udpa_resource_subscribe* and // *udpa_resources_unsubscribe*. string type_url = 2; // DeltaDiscoveryRequests allow the client to add or remove individual // resources to the set of tracked resources in the context of a stream. // All resource names in the resource_names_subscribe list are added to the // set of tracked resources and all resource names in the resource_names_unsubscribe // list are removed from the set of tracked resources. // // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or // resource_names_unsubscribe list simply means that no resources are to be // added or removed to the resource list. // *Like* state-of-the-world xDS, the server must send updates for all tracked // resources, but can also send updates for resources the client has not subscribed to. // // NOTE: the server must respond with all resources listed in resource_names_subscribe, // even if it believes the client has the most recent version of them. The reason: // the client may have dropped them, but then regained interest before it had a chance // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. // // These two fields can be set in any DeltaDiscoveryRequest, including ACKs // and initial_resource_versions. // // A list of Resource names to add to the list of tracked resources. repeated string resource_names_subscribe = 3; // As with *resource_names_subscribe* but used when subscribing to resources indicated // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator // are ignored and the context parameters are matched with // *context_param_specifier* specific semantics. // [#not-implemented-hide:] repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8; // A list of Resource names to remove from the list of tracked resources. repeated string resource_names_unsubscribe = 4; // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed // resource locator provided in *udpa_resources_subscribe*. // [#not-implemented-hide:] repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9; // Informs the server of the versions of the resources the xDS client knows of, to enable the // client to continue the same logical xDS session even in the face of gRPC stream reconnection. // It will not be populated: [1] in the very first stream of a session, since the client will // not yet have any resources, [2] in any message after the first in a stream (for a given // type_url), since the server will already be correctly tracking the client's state. // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) // The map's keys are names of xDS resources known to the xDS client. // The map's values are opaque resource versions. map initial_resource_versions = 5; // When the DeltaDiscoveryRequest is a ACK or NACK message in response // to a previous DeltaDiscoveryResponse, the response_nonce must be the // nonce in the DeltaDiscoveryResponse. // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. string response_nonce = 6; // This is populated when the previous :ref:`DiscoveryResponse ` // failed to update configuration. The *message* field in *error_details* // provides the Envoy internal exception related to the failure. google.rpc.Status error_detail = 7; } // [#next-free-field: 8] message DeltaDiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.DeltaDiscoveryResponse"; // The version of the response data (used for debugging). string system_version_info = 1; // The response resources. These are typed resources, whose types must match // the type_url field. repeated Resource resources = 2; // field id 3 IS available! // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. // This does not need to be set if *udpa_removed_resources* is used instead of // *removed_resources*. string type_url = 4; // Resources names of resources that have be deleted and to be removed from the xDS Client. // Removed resources for missing resources can be ignored. repeated string removed_resources = 6; // As with *removed_resources* but used when a removed resource was named in // its *Resource*s with a *udpa.core.v1.ResourceName*. // [#not-implemented-hide:] repeated udpa.core.v1.ResourceName udpa_removed_resources = 7; // The nonce provides a way for DeltaDiscoveryRequests to uniquely // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. string nonce = 5; } // [#next-free-field: 6] message Resource { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.Resource"; // The resource's name, to distinguish it from others of the same type of resource. string name = 3 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered. udpa.core.v1.ResourceName udpa_resource_name = 5 [(udpa.annotations.field_migrate).oneof_promotion = "name_specifier"]; // The aliases are a list of other names that this resource can go by. repeated string aliases = 4; // The resource level version. It allows xDS to track the state of individual // resources. string version = 1; // The resource being tracked. google.protobuf.Any resource = 2; } ================================================ FILE: api/envoy/service/discovery/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/service/discovery/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@com_github_cncf_udpa//udpa/core/v1:pkg", ], ) ================================================ FILE: api/envoy/service/discovery/v4alpha/ads.proto ================================================ syntax = "proto3"; package envoy.service.discovery.v4alpha; import "envoy/service/discovery/v4alpha/discovery.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v4alpha"; option java_outer_classname = "AdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Aggregated Discovery Service (ADS)] // [#not-implemented-hide:] Discovery services for endpoints, clusters, routes, // and listeners are retained in the package `envoy.api.v2` for backwards // compatibility with existing management servers. New development in discovery // services should proceed in the package `envoy.service.discovery.v2`. // See https://github.com/lyft/envoy-api#apis for a description of the role of // ADS and how it is intended to be used by a management server. ADS requests // have the same structure as their singleton xDS counterparts, but can // multiplex many resource types on a single stream. The type_url in the // DiscoveryRequest/DiscoveryResponse provides sufficient information to recover // the multiplexed singleton APIs at the Envoy instance and management server. service AggregatedDiscoveryService { // This is a gRPC-only API. rpc StreamAggregatedResources(stream DiscoveryRequest) returns (stream DiscoveryResponse) { } rpc DeltaAggregatedResources(stream DeltaDiscoveryRequest) returns (stream DeltaDiscoveryResponse) { } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 message AdsDummy { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v3.AdsDummy"; } ================================================ FILE: api/envoy/service/discovery/v4alpha/discovery.proto ================================================ syntax = "proto3"; package envoy.service.discovery.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "google/protobuf/any.proto"; import "google/rpc/status.proto"; import "udpa/core/v1/resource_locator.proto"; import "udpa/core/v1/resource_name.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.discovery.v4alpha"; option java_outer_classname = "DiscoveryProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Common discovery API components] // A DiscoveryRequest requests a set of versioned resources of the same type for // a given Envoy node on some API. // [#next-free-field: 7] message DiscoveryRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v3.DiscoveryRequest"; // The version_info provided in the request messages will be the version_info // received with the most recent successfully processed response or empty on // the first request. It is expected that no new request is sent after a // response is received until the Envoy instance is ready to ACK/NACK the new // configuration. ACK/NACK takes place by returning the new API config version // as applied or the previous API config version respectively. Each type_url // (see below) has an independent version associated with it. string version_info = 1; // The node making the request. config.core.v4alpha.Node node = 2; // List of resources to subscribe to, e.g. list of cluster names or a route // configuration name. If this is empty, all resources for the API are // returned. LDS/CDS may have empty resource_names, which will cause all // resources for the Envoy instance to be returned. The LDS and CDS responses // will then imply a number of resources that need to be fetched via EDS/RDS, // which will be explicitly enumerated in resource_names. repeated string resource_names = 3; // Type of the resource that is being requested, e.g. // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This is implicit // in requests made via singleton xDS APIs such as CDS, LDS, etc. but is // required for ADS. string type_url = 4; // nonce corresponding to DiscoveryResponse being ACK/NACKed. See above // discussion on version_info and the DiscoveryResponse nonce comment. This // may be empty only if 1) this is a non-persistent-stream xDS such as HTTP, // or 2) the client has not yet accepted an update in this xDS stream (unlike // delta, where it is populated only for new explicit ACKs). string response_nonce = 5; // This is populated when the previous :ref:`DiscoveryResponse ` // failed to update configuration. The *message* field in *error_details* provides the Envoy // internal exception related to the failure. It is only intended for consumption during manual // debugging, the string provided is not guaranteed to be stable across Envoy versions. google.rpc.Status error_detail = 6; } // [#next-free-field: 7] message DiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v3.DiscoveryResponse"; // The version of the response data. string version_info = 1; // The response resources. These resources are typed and depend on the API being called. repeated google.protobuf.Any resources = 2; // [#not-implemented-hide:] // Canary is used to support two Envoy command line flags: // // * --terminate-on-canary-transition-failure. When set, Envoy is able to // terminate if it detects that configuration is stuck at canary. Consider // this example sequence of updates: // - Management server applies a canary config successfully. // - Management server rolls back to a production config. // - Envoy rejects the new production config. // Since there is no sensible way to continue receiving configuration // updates, Envoy will then terminate and apply production config from a // clean slate. // * --dry-run-canary. When set, a canary response will never be applied, only // validated via a dry run. bool canary = 3; // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the 'resources' repeated Any (if non-empty). string type_url = 4; // For gRPC based subscriptions, the nonce provides a way to explicitly ack a // specific DiscoveryResponse in a following DiscoveryRequest. Additional // messages may have been sent by Envoy to the management server for the // previous version on the stream prior to this DiscoveryResponse, that were // unprocessed at response send time. The nonce allows the management server // to ignore any further DiscoveryRequests for the previous version until a // DiscoveryRequest bearing the nonce. The nonce is optional and is not // required for non-stream based xDS implementations. string nonce = 5; // [#not-implemented-hide:] // The control plane instance that sent the response. config.core.v4alpha.ControlPlane control_plane = 6; } // DeltaDiscoveryRequest and DeltaDiscoveryResponse are used in a new gRPC // endpoint for Delta xDS. // // With Delta xDS, the DeltaDiscoveryResponses do not need to include a full // snapshot of the tracked resources. Instead, DeltaDiscoveryResponses are a // diff to the state of a xDS client. // In Delta XDS there are per-resource versions, which allow tracking state at // the resource granularity. // An xDS Delta session is always in the context of a gRPC bidirectional // stream. This allows the xDS server to keep track of the state of xDS clients // connected to it. // // In Delta xDS the nonce field is required and used to pair // DeltaDiscoveryResponse to a DeltaDiscoveryRequest ACK or NACK. // Optionally, a response message level system_version_info is present for // debugging purposes only. // // DeltaDiscoveryRequest plays two independent roles. Any DeltaDiscoveryRequest // can be either or both of: [1] informing the server of what resources the // client has gained/lost interest in (using resource_names_subscribe and // resource_names_unsubscribe), or [2] (N)ACKing an earlier resource update from // the server (using response_nonce, with presence of error_detail making it a NACK). // Additionally, the first message (for a given type_url) of a reconnected gRPC stream // has a third role: informing the server of the resources (and their versions) // that the client already possesses, using the initial_resource_versions field. // // As with state-of-the-world, when multiple resource types are multiplexed (ADS), // all requests/acknowledgments/updates are logically walled off by type_url: // a Cluster ACK exists in a completely separate world from a prior Route NACK. // In particular, initial_resource_versions being sent at the "start" of every // gRPC stream actually entails a message for each type_url, each with its own // initial_resource_versions. // [#next-free-field: 10] message DeltaDiscoveryRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v3.DeltaDiscoveryRequest"; // The node making the request. config.core.v4alpha.Node node = 1; // Type of the resource that is being requested, e.g. // "type.googleapis.com/envoy.api.v2.ClusterLoadAssignment". This does not need to be set if // resources are only referenced via *udpa_resource_subscribe* and // *udpa_resources_unsubscribe*. string type_url = 2; // DeltaDiscoveryRequests allow the client to add or remove individual // resources to the set of tracked resources in the context of a stream. // All resource names in the resource_names_subscribe list are added to the // set of tracked resources and all resource names in the resource_names_unsubscribe // list are removed from the set of tracked resources. // // *Unlike* state-of-the-world xDS, an empty resource_names_subscribe or // resource_names_unsubscribe list simply means that no resources are to be // added or removed to the resource list. // *Like* state-of-the-world xDS, the server must send updates for all tracked // resources, but can also send updates for resources the client has not subscribed to. // // NOTE: the server must respond with all resources listed in resource_names_subscribe, // even if it believes the client has the most recent version of them. The reason: // the client may have dropped them, but then regained interest before it had a chance // to send the unsubscribe message. See DeltaSubscriptionStateTest.RemoveThenAdd. // // These two fields can be set in any DeltaDiscoveryRequest, including ACKs // and initial_resource_versions. // // A list of Resource names to add to the list of tracked resources. repeated string resource_names_subscribe = 3; // As with *resource_names_subscribe* but used when subscribing to resources indicated // by a *udpa.core.v1.ResourceLocator*. The directives in the resource locator // are ignored and the context parameters are matched with // *context_param_specifier* specific semantics. // [#not-implemented-hide:] repeated udpa.core.v1.ResourceLocator udpa_resources_subscribe = 8; // A list of Resource names to remove from the list of tracked resources. repeated string resource_names_unsubscribe = 4; // As with *resource_names_unsubscribe* but used when unsubscribing to resources indicated by a // *udpa.core.v1.ResourceLocator*. This must match a previously subscribed // resource locator provided in *udpa_resources_subscribe*. // [#not-implemented-hide:] repeated udpa.core.v1.ResourceLocator udpa_resources_unsubscribe = 9; // Informs the server of the versions of the resources the xDS client knows of, to enable the // client to continue the same logical xDS session even in the face of gRPC stream reconnection. // It will not be populated: [1] in the very first stream of a session, since the client will // not yet have any resources, [2] in any message after the first in a stream (for a given // type_url), since the server will already be correctly tracking the client's state. // (In ADS, the first message *of each type_url* of a reconnected stream populates this map.) // The map's keys are names of xDS resources known to the xDS client. // The map's values are opaque resource versions. map initial_resource_versions = 5; // When the DeltaDiscoveryRequest is a ACK or NACK message in response // to a previous DeltaDiscoveryResponse, the response_nonce must be the // nonce in the DeltaDiscoveryResponse. // Otherwise (unlike in DiscoveryRequest) response_nonce must be omitted. string response_nonce = 6; // This is populated when the previous :ref:`DiscoveryResponse ` // failed to update configuration. The *message* field in *error_details* // provides the Envoy internal exception related to the failure. google.rpc.Status error_detail = 7; } // [#next-free-field: 8] message DeltaDiscoveryResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v3.DeltaDiscoveryResponse"; // The version of the response data (used for debugging). string system_version_info = 1; // The response resources. These are typed resources, whose types must match // the type_url field. repeated Resource resources = 2; // field id 3 IS available! // Type URL for resources. Identifies the xDS API when muxing over ADS. // Must be consistent with the type_url in the Any within 'resources' if 'resources' is non-empty. // This does not need to be set if *udpa_removed_resources* is used instead of // *removed_resources*. string type_url = 4; // Resources names of resources that have be deleted and to be removed from the xDS Client. // Removed resources for missing resources can be ignored. repeated string removed_resources = 6; // As with *removed_resources* but used when a removed resource was named in // its *Resource*s with a *udpa.core.v1.ResourceName*. // [#not-implemented-hide:] repeated udpa.core.v1.ResourceName udpa_removed_resources = 7; // The nonce provides a way for DeltaDiscoveryRequests to uniquely // reference a DeltaDiscoveryResponse when (N)ACKing. The nonce is required. string nonce = 5; } // [#next-free-field: 6] message Resource { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v3.Resource"; oneof name_specifier { // The resource's name, to distinguish it from others of the same type of resource. string name = 3; // Used instead of *name* when a resource with a *udpa.core.v1.ResourceName* is delivered. udpa.core.v1.ResourceName udpa_resource_name = 5; } // The aliases are a list of other names that this resource can go by. repeated string aliases = 4; // The resource level version. It allows xDS to track the state of individual // resources. string version = 1; // The resource being tracked. google.protobuf.Any resource = 2; } ================================================ FILE: api/envoy/service/endpoint/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/annotations:pkg", "//envoy/api/v2:pkg", "//envoy/service/discovery/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/endpoint/v3/eds.proto ================================================ syntax = "proto3"; package envoy.service.endpoint.v3; import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.endpoint.v3"; option java_outer_classname = "EdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: EDS] // Endpoint discovery :ref:`architecture overview ` service EndpointDiscoveryService { option (envoy.annotations.resource).type = "envoy.config.endpoint.v3.ClusterLoadAssignment"; // The resource_names field in DiscoveryRequest specifies a list of clusters // to subscribe to updates for. rpc StreamEndpoints(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } rpc DeltaEndpoints(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } rpc FetchEndpoints(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { option (google.api.http).post = "/v3/discovery:endpoints"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message EdsDummy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.EdsDummy"; } ================================================ FILE: api/envoy/service/event_reporting/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/event_reporting/v2alpha/event_reporting_service.proto ================================================ syntax = "proto3"; package envoy.service.event_reporting.v2alpha; import "envoy/api/v2/core/base.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.event_reporting.v2alpha"; option java_outer_classname = "EventReportingServiceProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.service.event_reporting.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: gRPC Event Reporting Service] // [#not-implemented-hide:] // Service for streaming different types of events from Envoy to a server. The examples of // such events may be health check or outlier detection events. service EventReportingService { // Envoy will connect and send StreamEventsRequest messages forever. // The management server may send StreamEventsResponse to configure event stream. See below. // This API is designed for high throughput with the expectation that it might be lossy. rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { } } // [#not-implemented-hide:] // An events envoy sends to the management server. message StreamEventsRequest { message Identifier { // The node sending the event messages over the stream. api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; } // Identifier data that will only be sent in the first message on the stream. This is effectively // structured metadata and is a performance optimization. Identifier identifier = 1; // Batch of events. When the stream is already active, it will be the events occurred // since the last message had been sent. If the server receives unknown event type, it should // silently ignore it. // // The following events are supported: // // * :ref:`HealthCheckEvent ` // * :ref:`OutlierDetectionEvent ` repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; } // [#not-implemented-hide:] // The management server may send envoy a StreamEventsResponse to tell which events the server // is interested in. In future, with aggregated event reporting service, this message will // contain, for example, clusters the envoy should send events for, or event types the server // wants to process. message StreamEventsResponse { } ================================================ FILE: api/envoy/service/event_reporting/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", "//envoy/service/event_reporting/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/event_reporting/v3/event_reporting_service.proto ================================================ syntax = "proto3"; package envoy.service.event_reporting.v3; import "envoy/config/core/v3/base.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.event_reporting.v3"; option java_outer_classname = "EventReportingServiceProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: gRPC Event Reporting Service] // [#not-implemented-hide:] // Service for streaming different types of events from Envoy to a server. The examples of // such events may be health check or outlier detection events. service EventReportingService { // Envoy will connect and send StreamEventsRequest messages forever. // The management server may send StreamEventsResponse to configure event stream. See below. // This API is designed for high throughput with the expectation that it might be lossy. rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { } } // [#not-implemented-hide:] // An events envoy sends to the management server. message StreamEventsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.event_reporting.v2alpha.StreamEventsRequest"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.event_reporting.v2alpha.StreamEventsRequest.Identifier"; // The node sending the event messages over the stream. config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; } // Identifier data that will only be sent in the first message on the stream. This is effectively // structured metadata and is a performance optimization. Identifier identifier = 1; // Batch of events. When the stream is already active, it will be the events occurred // since the last message had been sent. If the server receives unknown event type, it should // silently ignore it. // // The following events are supported: // // * :ref:`HealthCheckEvent ` // * :ref:`OutlierDetectionEvent ` repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; } // [#not-implemented-hide:] // The management server may send envoy a StreamEventsResponse to tell which events the server // is interested in. In future, with aggregated event reporting service, this message will // contain, for example, clusters the envoy should send events for, or event types the server // wants to process. message StreamEventsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.event_reporting.v2alpha.StreamEventsResponse"; } ================================================ FILE: api/envoy/service/event_reporting/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/service/event_reporting/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/event_reporting/v4alpha/event_reporting_service.proto ================================================ syntax = "proto3"; package envoy.service.event_reporting.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "google/protobuf/any.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.event_reporting.v4alpha"; option java_outer_classname = "EventReportingServiceProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: gRPC Event Reporting Service] // [#not-implemented-hide:] // Service for streaming different types of events from Envoy to a server. The examples of // such events may be health check or outlier detection events. service EventReportingService { // Envoy will connect and send StreamEventsRequest messages forever. // The management server may send StreamEventsResponse to configure event stream. See below. // This API is designed for high throughput with the expectation that it might be lossy. rpc StreamEvents(stream StreamEventsRequest) returns (stream StreamEventsResponse) { } } // [#not-implemented-hide:] // An events envoy sends to the management server. message StreamEventsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.event_reporting.v3.StreamEventsRequest"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.event_reporting.v3.StreamEventsRequest.Identifier"; // The node sending the event messages over the stream. config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; } // Identifier data that will only be sent in the first message on the stream. This is effectively // structured metadata and is a performance optimization. Identifier identifier = 1; // Batch of events. When the stream is already active, it will be the events occurred // since the last message had been sent. If the server receives unknown event type, it should // silently ignore it. // // The following events are supported: // // * :ref:`HealthCheckEvent ` // * :ref:`OutlierDetectionEvent ` repeated google.protobuf.Any events = 2 [(validate.rules).repeated = {min_items: 1}]; } // [#not-implemented-hide:] // The management server may send envoy a StreamEventsResponse to tell which events the server // is interested in. In future, with aggregated event reporting service, this message will // contain, for example, clusters the envoy should send events for, or event types the server // wants to process. message StreamEventsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.event_reporting.v3.StreamEventsResponse"; } ================================================ FILE: api/envoy/service/extension/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/annotations:pkg", "//envoy/service/discovery/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/extension/v3/config_discovery.proto ================================================ syntax = "proto3"; package envoy.service.extension.v3; import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.extension.v3"; option java_outer_classname = "ConfigDiscoveryProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Extension Config Discovery Service (ECDS)] // Return extension configurations. service ExtensionConfigDiscoveryService { option (envoy.annotations.resource).type = "envoy.config.core.v3.TypedExtensionConfig"; rpc StreamExtensionConfigs(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } rpc DeltaExtensionConfigs(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } rpc FetchExtensionConfigs(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { option (google.api.http).post = "/v3/discovery:extension_configs"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue // with importing services: https://github.com/google/protobuf/issues/4221 and // protoxform to upgrade the file. message EcdsDummy { } ================================================ FILE: api/envoy/service/health/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/cluster/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/service/discovery/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/health/v3/hds.proto ================================================ syntax = "proto3"; package envoy.service.health.v3; import "envoy/config/cluster/v3/cluster.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/config/core/v3/health_check.proto"; import "envoy/config/endpoint/v3/endpoint_components.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.health.v3"; option java_outer_classname = "HdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Health Discovery Service (HDS)] // HDS is Health Discovery Service. It compliments Envoy’s health checking // service by designating this Envoy to be a healthchecker for a subset of hosts // in the cluster. The status of these health checks will be reported to the // management server, where it can be aggregated etc and redistributed back to // Envoy through EDS. service HealthDiscoveryService { // 1. Envoy starts up and if its can_healthcheck option in the static // bootstrap config is enabled, sends HealthCheckRequest to the management // server. It supplies its capabilities (which protocol it can health check // with, what zone it resides in, etc.). // 2. In response to (1), the management server designates this Envoy as a // healthchecker to health check a subset of all upstream hosts for a given // cluster (for example upstream Host 1 and Host 2). It streams // HealthCheckSpecifier messages with cluster related configuration for all // clusters this Envoy is designated to health check. Subsequent // HealthCheckSpecifier message will be sent on changes to: // a. Endpoints to health checks // b. Per cluster configuration change // 3. Envoy creates a health probe based on the HealthCheck config and sends // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck // configuration Envoy waits upon the arrival of the probe response and // looks at the content of the response to decide whether the endpoint is // healthy or not. If a response hasn't been received within the timeout // interval, the endpoint health status is considered TIMEOUT. // 4. Envoy reports results back in an EndpointHealthResponse message. // Envoy streams responses as often as the interval configured by the // management server in HealthCheckSpecifier. // 5. The management Server collects health statuses for all endpoints in the // cluster (for all clusters) and uses this information to construct // EndpointDiscoveryResponse messages. // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load // balances traffic to them without additional health checking. It may // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection // failed to a particular endpoint to account for health status propagation // delay between HDS and EDS). // By default, can_healthcheck is true. If can_healthcheck is false, Cluster // configuration may not contain HealthCheck message. // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above // invariant? // TODO(htuch): Add @amb67's diagram. rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) returns (stream HealthCheckSpecifier) { } // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of // request/response. Should we add an identifier to the HealthCheckSpecifier // to bind with the response? rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { option (google.api.http).post = "/v3/discovery:health_check"; option (google.api.http).body = "*"; } } // Defines supported protocols etc, so the management server can assign proper // endpoints to healthcheck. message Capability { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.Capability"; // Different Envoy instances may have different capabilities (e.g. Redis) // and/or have ports enabled for different protocols. enum Protocol { HTTP = 0; TCP = 1; REDIS = 2; } repeated Protocol health_check_protocols = 1; } message HealthCheckRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.HealthCheckRequest"; config.core.v3.Node node = 1; Capability capability = 2; } message EndpointHealth { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.EndpointHealth"; config.endpoint.v3.Endpoint endpoint = 1; config.core.v3.HealthStatus health_status = 2; } // Group endpoint health by locality under each cluster. message LocalityEndpointsHealth { config.core.v3.Locality locality = 1; repeated EndpointHealth endpoints_health = 2; } // The health status of endpoints in a cluster. The cluster name and locality // should match the corresponding fields in ClusterHealthCheck message. message ClusterEndpointsHealth { string cluster_name = 1; repeated LocalityEndpointsHealth locality_endpoints_health = 2; } message EndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.EndpointHealthResponse"; // Deprecated - Flat list of endpoint health information. repeated EndpointHealth endpoints_health = 1 [deprecated = true]; // Organize Endpoint health information by cluster. repeated ClusterEndpointsHealth cluster_endpoints_health = 2; } message HealthCheckRequestOrEndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.HealthCheckRequestOrEndpointHealthResponse"; oneof request_type { HealthCheckRequest health_check_request = 1; EndpointHealthResponse endpoint_health_response = 2; } } message LocalityEndpoints { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.LocalityEndpoints"; config.core.v3.Locality locality = 1; repeated config.endpoint.v3.Endpoint endpoints = 2; } // The cluster name and locality is provided to Envoy for the endpoints that it // health checks to support statistics reporting, logging and debugging by the // Envoy instance (outside of HDS). For maximum usefulness, it should match the // same cluster structure as that provided by EDS. message ClusterHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.ClusterHealthCheck"; string cluster_name = 1; repeated config.core.v3.HealthCheck health_checks = 2; repeated LocalityEndpoints locality_endpoints = 3; // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria ` // on connection when health checking. For more details, see // :ref:`config.cluster.v3.Cluster.transport_socket_matches `. repeated config.cluster.v3.Cluster.TransportSocketMatch transport_socket_matches = 4; } message HealthCheckSpecifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.HealthCheckSpecifier"; repeated ClusterHealthCheck cluster_health_checks = 1; // The default is 1 second. google.protobuf.Duration interval = 2; } ================================================ FILE: api/envoy/service/health/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/cluster/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/service/health/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/health/v4alpha/hds.proto ================================================ syntax = "proto3"; package envoy.service.health.v4alpha; import "envoy/config/cluster/v4alpha/cluster.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/core/v4alpha/health_check.proto"; import "envoy/config/endpoint/v3/endpoint_components.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.health.v4alpha"; option java_outer_classname = "HdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Health Discovery Service (HDS)] // HDS is Health Discovery Service. It compliments Envoy’s health checking // service by designating this Envoy to be a healthchecker for a subset of hosts // in the cluster. The status of these health checks will be reported to the // management server, where it can be aggregated etc and redistributed back to // Envoy through EDS. service HealthDiscoveryService { // 1. Envoy starts up and if its can_healthcheck option in the static // bootstrap config is enabled, sends HealthCheckRequest to the management // server. It supplies its capabilities (which protocol it can health check // with, what zone it resides in, etc.). // 2. In response to (1), the management server designates this Envoy as a // healthchecker to health check a subset of all upstream hosts for a given // cluster (for example upstream Host 1 and Host 2). It streams // HealthCheckSpecifier messages with cluster related configuration for all // clusters this Envoy is designated to health check. Subsequent // HealthCheckSpecifier message will be sent on changes to: // a. Endpoints to health checks // b. Per cluster configuration change // 3. Envoy creates a health probe based on the HealthCheck config and sends // it to endpoint(ip:port) of Host 1 and 2. Based on the HealthCheck // configuration Envoy waits upon the arrival of the probe response and // looks at the content of the response to decide whether the endpoint is // healthy or not. If a response hasn't been received within the timeout // interval, the endpoint health status is considered TIMEOUT. // 4. Envoy reports results back in an EndpointHealthResponse message. // Envoy streams responses as often as the interval configured by the // management server in HealthCheckSpecifier. // 5. The management Server collects health statuses for all endpoints in the // cluster (for all clusters) and uses this information to construct // EndpointDiscoveryResponse messages. // 6. Once Envoy has a list of upstream endpoints to send traffic to, it load // balances traffic to them without additional health checking. It may // use inline healthcheck (i.e. consider endpoint UNHEALTHY if connection // failed to a particular endpoint to account for health status propagation // delay between HDS and EDS). // By default, can_healthcheck is true. If can_healthcheck is false, Cluster // configuration may not contain HealthCheck message. // TODO(htuch): How is can_healthcheck communicated to CDS to ensure the above // invariant? // TODO(htuch): Add @amb67's diagram. rpc StreamHealthCheck(stream HealthCheckRequestOrEndpointHealthResponse) returns (stream HealthCheckSpecifier) { } // TODO(htuch): Unlike the gRPC version, there is no stream-based binding of // request/response. Should we add an identifier to the HealthCheckSpecifier // to bind with the response? rpc FetchHealthCheck(HealthCheckRequestOrEndpointHealthResponse) returns (HealthCheckSpecifier) { option (google.api.http).post = "/v3/discovery:health_check"; option (google.api.http).body = "*"; } } // Defines supported protocols etc, so the management server can assign proper // endpoints to healthcheck. message Capability { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.Capability"; // Different Envoy instances may have different capabilities (e.g. Redis) // and/or have ports enabled for different protocols. enum Protocol { HTTP = 0; TCP = 1; REDIS = 2; } repeated Protocol health_check_protocols = 1; } message HealthCheckRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.HealthCheckRequest"; config.core.v4alpha.Node node = 1; Capability capability = 2; } message EndpointHealth { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.EndpointHealth"; config.endpoint.v3.Endpoint endpoint = 1; config.core.v4alpha.HealthStatus health_status = 2; } // Group endpoint health by locality under each cluster. message LocalityEndpointsHealth { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.LocalityEndpointsHealth"; config.core.v4alpha.Locality locality = 1; repeated EndpointHealth endpoints_health = 2; } // The health status of endpoints in a cluster. The cluster name and locality // should match the corresponding fields in ClusterHealthCheck message. message ClusterEndpointsHealth { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.ClusterEndpointsHealth"; string cluster_name = 1; repeated LocalityEndpointsHealth locality_endpoints_health = 2; } message EndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.EndpointHealthResponse"; reserved 1; reserved "endpoints_health"; // Organize Endpoint health information by cluster. repeated ClusterEndpointsHealth cluster_endpoints_health = 2; } message HealthCheckRequestOrEndpointHealthResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.HealthCheckRequestOrEndpointHealthResponse"; oneof request_type { HealthCheckRequest health_check_request = 1; EndpointHealthResponse endpoint_health_response = 2; } } message LocalityEndpoints { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.LocalityEndpoints"; config.core.v4alpha.Locality locality = 1; repeated config.endpoint.v3.Endpoint endpoints = 2; } // The cluster name and locality is provided to Envoy for the endpoints that it // health checks to support statistics reporting, logging and debugging by the // Envoy instance (outside of HDS). For maximum usefulness, it should match the // same cluster structure as that provided by EDS. message ClusterHealthCheck { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.ClusterHealthCheck"; string cluster_name = 1; repeated config.core.v4alpha.HealthCheck health_checks = 2; repeated LocalityEndpoints locality_endpoints = 3; // Optional map that gets filtered by :ref:`health_checks.transport_socket_match_criteria ` // on connection when health checking. For more details, see // :ref:`config.cluster.v3.Cluster.transport_socket_matches `. repeated config.cluster.v4alpha.Cluster.TransportSocketMatch transport_socket_matches = 4; } message HealthCheckSpecifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.health.v3.HealthCheckSpecifier"; repeated ClusterHealthCheck cluster_health_checks = 1; // The default is 1 second. google.protobuf.Duration interval = 2; } ================================================ FILE: api/envoy/service/listener/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/annotations:pkg", "//envoy/api/v2:pkg", "//envoy/service/discovery/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/listener/v3/lds.proto ================================================ syntax = "proto3"; package envoy.service.listener.v3; import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.listener.v3"; option java_outer_classname = "LdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Listener] // Listener :ref:`configuration overview ` // The Envoy instance initiates an RPC at startup to discover a list of // listeners. Updates are delivered via streaming from the LDS server and // consist of a complete update of all listeners. Existing connections will be // allowed to drain from listeners that are no longer present. service ListenerDiscoveryService { option (envoy.annotations.resource).type = "envoy.config.listener.v3.Listener"; rpc DeltaListeners(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } rpc StreamListeners(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } rpc FetchListeners(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { option (google.api.http).post = "/v3/discovery:listeners"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message LdsDummy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.LdsDummy"; } ================================================ FILE: api/envoy/service/load_stats/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/endpoint:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/load_stats/v2/lrs.proto ================================================ syntax = "proto3"; package envoy.service.load_stats.v2; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/endpoint/load_report.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.load_stats.v2"; option java_outer_classname = "LrsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Load reporting service] service LoadReportingService { // Advanced API to allow for multi-dimensional load balancing by remote // server. For receiving LB assignments, the steps are: // 1, The management server is configured with per cluster/zone/load metric // capacity configuration. The capacity configuration definition is // outside of the scope of this document. // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters // to balance. // // Independently, Envoy will initiate a StreamLoadStats bidi stream with a // management server: // 1. Once a connection establishes, the management server publishes a // LoadStatsResponse for all clusters it is interested in learning load // stats about. // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts // based on per-zone weights and/or per-instance weights (if specified) // based on intra-zone LbPolicy. This information comes from the above // {Stream,Fetch}Endpoints. // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. // 4. Envoy aggregates load reports over the period of time given to it in // LoadStatsResponse.load_reporting_interval. This includes aggregation // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as // well as load metrics from upstream hosts. // 5. When the timer of load_reporting_interval expires, Envoy sends new // LoadStatsRequest filled with load reports for each cluster. // 6. The management server uses the load reports from all reported Envoys // from around the world, computes global assignment and prepares traffic // assignment destined for each zone Envoys are located in. Goto 2. rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { } } // A load report Envoy sends to the management server. // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsRequest { // Node identifier for Envoy instance. api.v2.core.Node node = 1; // A list of load stats to report. repeated api.v2.endpoint.ClusterStats cluster_stats = 2; } // The management server sends envoy a LoadStatsResponse with all clusters it // is interested in learning load stats about. // [#not-implemented-hide:] Not configuration. TBD how to doc proto APIs. message LoadStatsResponse { // Clusters to report stats for. // Not populated if *send_all_clusters* is true. repeated string clusters = 1; // If true, the client should send all clusters it knows about. // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their // :ref:`client_features` field will honor this field. bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // 1. There may be some delay from when the timer fires until stats sampling occurs. // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic // that is observed in between the corresponding previous *LoadStatsRequest* and this // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period // of inobservability that might otherwise exists between the messages. New clusters are not // subject to this consideration. google.protobuf.Duration load_reporting_interval = 2; // Set to *true* if the management server supports endpoint granularity // report. bool report_endpoint_granularity = 3; } ================================================ FILE: api/envoy/service/load_stats/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/service/load_stats/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/load_stats/v3/lrs.proto ================================================ syntax = "proto3"; package envoy.service.load_stats.v3; import "envoy/config/core/v3/base.proto"; import "envoy/config/endpoint/v3/load_report.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.load_stats.v3"; option java_outer_classname = "LrsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Load Reporting service (LRS)] // Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional // stream with a management server. Upon connecting, the management server can send a // :ref:`LoadStatsResponse ` to a node it is // interested in getting the load reports for. Envoy in this node will start sending // :ref:`LoadStatsRequest `. This is done periodically // based on the :ref:`load reporting interval ` // For details, take a look at the :ref:`Load Reporting Service sandbox example `. service LoadReportingService { // Advanced API to allow for multi-dimensional load balancing by remote // server. For receiving LB assignments, the steps are: // 1, The management server is configured with per cluster/zone/load metric // capacity configuration. The capacity configuration definition is // outside of the scope of this document. // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters // to balance. // // Independently, Envoy will initiate a StreamLoadStats bidi stream with a // management server: // 1. Once a connection establishes, the management server publishes a // LoadStatsResponse for all clusters it is interested in learning load // stats about. // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts // based on per-zone weights and/or per-instance weights (if specified) // based on intra-zone LbPolicy. This information comes from the above // {Stream,Fetch}Endpoints. // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. // 4. Envoy aggregates load reports over the period of time given to it in // LoadStatsResponse.load_reporting_interval. This includes aggregation // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as // well as load metrics from upstream hosts. // 5. When the timer of load_reporting_interval expires, Envoy sends new // LoadStatsRequest filled with load reports for each cluster. // 6. The management server uses the load reports from all reported Envoys // from around the world, computes global assignment and prepares traffic // assignment destined for each zone Envoys are located in. Goto 2. rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { } } // A load report Envoy sends to the management server. message LoadStatsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsRequest"; // Node identifier for Envoy instance. config.core.v3.Node node = 1; // A list of load stats to report. repeated config.endpoint.v3.ClusterStats cluster_stats = 2; } // The management server sends envoy a LoadStatsResponse with all clusters it // is interested in learning load stats about. message LoadStatsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v2.LoadStatsResponse"; // Clusters to report stats for. // Not populated if *send_all_clusters* is true. repeated string clusters = 1; // If true, the client should send all clusters it knows about. // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their // :ref:`client_features` field will honor this field. bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // // 1. There may be some delay from when the timer fires until stats sampling occurs. // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic // that is observed in between the corresponding previous *LoadStatsRequest* and this // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period // of inobservability that might otherwise exists between the messages. New clusters are not // subject to this consideration. google.protobuf.Duration load_reporting_interval = 2; // Set to *true* if the management server supports endpoint granularity // report. bool report_endpoint_granularity = 3; } ================================================ FILE: api/envoy/service/load_stats/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/service/load_stats/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/load_stats/v4alpha/lrs.proto ================================================ syntax = "proto3"; package envoy.service.load_stats.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/config/endpoint/v3/load_report.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.load_stats.v4alpha"; option java_outer_classname = "LrsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Load Reporting service (LRS)] // Load Reporting Service is an Envoy API to emit load reports. Envoy will initiate a bi-directional // stream with a management server. Upon connecting, the management server can send a // :ref:`LoadStatsResponse ` to a node it is // interested in getting the load reports for. Envoy in this node will start sending // :ref:`LoadStatsRequest `. This is done periodically // based on the :ref:`load reporting interval ` // For details, take a look at the :ref:`Load Reporting Service sandbox example `. service LoadReportingService { // Advanced API to allow for multi-dimensional load balancing by remote // server. For receiving LB assignments, the steps are: // 1, The management server is configured with per cluster/zone/load metric // capacity configuration. The capacity configuration definition is // outside of the scope of this document. // 2. Envoy issues a standard {Stream,Fetch}Endpoints request for the clusters // to balance. // // Independently, Envoy will initiate a StreamLoadStats bidi stream with a // management server: // 1. Once a connection establishes, the management server publishes a // LoadStatsResponse for all clusters it is interested in learning load // stats about. // 2. For each cluster, Envoy load balances incoming traffic to upstream hosts // based on per-zone weights and/or per-instance weights (if specified) // based on intra-zone LbPolicy. This information comes from the above // {Stream,Fetch}Endpoints. // 3. When upstream hosts reply, they optionally add header with ASCII representation of EndpointLoadMetricStats. // 4. Envoy aggregates load reports over the period of time given to it in // LoadStatsResponse.load_reporting_interval. This includes aggregation // stats Envoy maintains by itself (total_requests, rpc_errors etc.) as // well as load metrics from upstream hosts. // 5. When the timer of load_reporting_interval expires, Envoy sends new // LoadStatsRequest filled with load reports for each cluster. // 6. The management server uses the load reports from all reported Envoys // from around the world, computes global assignment and prepares traffic // assignment destined for each zone Envoys are located in. Goto 2. rpc StreamLoadStats(stream LoadStatsRequest) returns (stream LoadStatsResponse) { } } // A load report Envoy sends to the management server. message LoadStatsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v3.LoadStatsRequest"; // Node identifier for Envoy instance. config.core.v4alpha.Node node = 1; // A list of load stats to report. repeated config.endpoint.v3.ClusterStats cluster_stats = 2; } // The management server sends envoy a LoadStatsResponse with all clusters it // is interested in learning load stats about. message LoadStatsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.load_stats.v3.LoadStatsResponse"; // Clusters to report stats for. // Not populated if *send_all_clusters* is true. repeated string clusters = 1; // If true, the client should send all clusters it knows about. // Only clients that advertise the "envoy.lrs.supports_send_all_clusters" capability in their // :ref:`client_features` field will honor this field. bool send_all_clusters = 4; // The minimum interval of time to collect stats over. This is only a minimum for two reasons: // // 1. There may be some delay from when the timer fires until stats sampling occurs. // 2. For clusters that were already feature in the previous *LoadStatsResponse*, any traffic // that is observed in between the corresponding previous *LoadStatsRequest* and this // *LoadStatsResponse* will also be accumulated and billed to the cluster. This avoids a period // of inobservability that might otherwise exists between the messages. New clusters are not // subject to this consideration. google.protobuf.Duration load_reporting_interval = 2; // Set to *true* if the management server supports endpoint granularity // report. bool report_endpoint_granularity = 3; } ================================================ FILE: api/envoy/service/metrics/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@prometheus_metrics_model//:client_model", ], ) ================================================ FILE: api/envoy/service/metrics/v2/metrics_service.proto ================================================ syntax = "proto3"; package envoy.service.metrics.v2; import "envoy/api/v2/core/base.proto"; import "metrics.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.metrics.v2"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metrics service] // Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric // data model as a standard to represent metrics information. service MetricsService { // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any // response to be sent as nothing would be done in the case of failure. rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { } } message StreamMetricsResponse { } message StreamMetricsMessage { message Identifier { // The node sending metrics over the stream. api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; } // Identifier data effectively is a structured metadata. As a performance optimization this will // only be sent in the first message on the stream. Identifier identifier = 1; // A list of metric entries repeated io.prometheus.client.MetricFamily envoy_metrics = 2; } ================================================ FILE: api/envoy/service/metrics/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", "//envoy/service/metrics/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@prometheus_metrics_model//:client_model", ], ) ================================================ FILE: api/envoy/service/metrics/v3/metrics_service.proto ================================================ syntax = "proto3"; package envoy.service.metrics.v3; import "envoy/config/core/v3/base.proto"; import "metrics.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.metrics.v3"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metrics service] // Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric // data model as a standard to represent metrics information. service MetricsService { // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any // response to be sent as nothing would be done in the case of failure. rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { } } message StreamMetricsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.metrics.v2.StreamMetricsResponse"; } message StreamMetricsMessage { option (udpa.annotations.versioning).previous_message_type = "envoy.service.metrics.v2.StreamMetricsMessage"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.metrics.v2.StreamMetricsMessage.Identifier"; // The node sending metrics over the stream. config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; } // Identifier data effectively is a structured metadata. As a performance optimization this will // only be sent in the first message on the stream. Identifier identifier = 1; // A list of metric entries repeated io.prometheus.client.MetricFamily envoy_metrics = 2; } ================================================ FILE: api/envoy/service/metrics/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/service/metrics/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@prometheus_metrics_model//:client_model", ], ) ================================================ FILE: api/envoy/service/metrics/v4alpha/metrics_service.proto ================================================ syntax = "proto3"; package envoy.service.metrics.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "metrics.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.metrics.v4alpha"; option java_outer_classname = "MetricsServiceProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Metrics service] // Service for streaming metrics to server that consumes the metrics data. It uses Prometheus metric // data model as a standard to represent metrics information. service MetricsService { // Envoy will connect and send StreamMetricsMessage messages forever. It does not expect any // response to be sent as nothing would be done in the case of failure. rpc StreamMetrics(stream StreamMetricsMessage) returns (StreamMetricsResponse) { } } message StreamMetricsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.metrics.v3.StreamMetricsResponse"; } message StreamMetricsMessage { option (udpa.annotations.versioning).previous_message_type = "envoy.service.metrics.v3.StreamMetricsMessage"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.metrics.v3.StreamMetricsMessage.Identifier"; // The node sending metrics over the stream. config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; } // Identifier data effectively is a structured metadata. As a performance optimization this will // only be sent in the first message on the stream. Identifier identifier = 1; // A list of metric entries repeated io.prometheus.client.MetricFamily envoy_metrics = 2; } ================================================ FILE: api/envoy/service/ratelimit/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/ratelimit:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/ratelimit/v2/rls.proto ================================================ syntax = "proto3"; package envoy.service.ratelimit.v2; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/ratelimit/ratelimit.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.ratelimit.v2"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Rate Limit Service (RLS)] service RateLimitService { // Determine whether rate limiting should take place. rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) { } } // Main message for a rate limit request. The rate limit service is designed to be fully generic // in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded // configuration will parse the request and find the most specific limit to apply. In addition, // a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors // are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any // of them are over limit. This enables more complex application level rate limiting scenarios // if desired. message RateLimitRequest { // All rate limit requests must specify a domain. This enables the configuration to be per // application without fear of overlap. E.g., "envoy". string domain = 1; // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is // processed by the service (see below). If any of the descriptors are over limit, the entire // request is considered to be over limit. repeated api.v2.ratelimit.RateLimitDescriptor descriptors = 2; // Rate limit requests can optionally specify the number of hits a request adds to the matched // limit. If the value is not set in the message, a request increases the matched limit by 1. uint32 hits_addend = 3; } // A response from a ShouldRateLimit call. message RateLimitResponse { enum Code { // The response code is not known. UNKNOWN = 0; // The response code to notify that the number of requests are under limit. OK = 1; // The response code to notify that the number of requests are over limit. OVER_LIMIT = 2; } // Defines an actual rate limit in terms of requests per unit of time and the unit itself. message RateLimit { enum Unit { // The time unit is not known. UNKNOWN = 0; // The time unit representing a second. SECOND = 1; // The time unit representing a minute. MINUTE = 2; // The time unit representing an hour. HOUR = 3; // The time unit representing a day. DAY = 4; } // A name or description of this limit. string name = 3; // The number of requests per unit of time. uint32 requests_per_unit = 1; // The unit of time. Unit unit = 2; } message DescriptorStatus { // The response code for an individual descriptor. Code code = 1; // The current limit as configured by the server. Useful for debugging, etc. RateLimit current_limit = 2; // The limit remaining in the current time unit. uint32 limit_remaining = 3; } // The overall response code which takes into account all of the descriptors that were passed // in the RateLimitRequest message. Code overall_code = 1; // A list of DescriptorStatus messages which matches the length of the descriptor list passed // in the RateLimitRequest. This can be used by the caller to determine which individual // descriptors failed and/or what the currently configured limits are for all of them. repeated DescriptorStatus statuses = 2; // A list of headers to add to the response repeated api.v2.core.HeaderValue headers = 3 [(udpa.annotations.field_migrate).rename = "response_headers_to_add"]; // A list of headers to add to the request when forwarded repeated api.v2.core.HeaderValue request_headers_to_add = 4; } ================================================ FILE: api/envoy/service/ratelimit/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/service/ratelimit/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/ratelimit/v3/rls.proto ================================================ syntax = "proto3"; package envoy.service.ratelimit.v3; import "envoy/config/core/v3/base.proto"; import "envoy/extensions/common/ratelimit/v3/ratelimit.proto"; import "google/protobuf/duration.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.ratelimit.v3"; option java_outer_classname = "RlsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Rate Limit Service (RLS)] service RateLimitService { // Determine whether rate limiting should take place. rpc ShouldRateLimit(RateLimitRequest) returns (RateLimitResponse) { } } // Main message for a rate limit request. The rate limit service is designed to be fully generic // in the sense that it can operate on arbitrary hierarchical key/value pairs. The loaded // configuration will parse the request and find the most specific limit to apply. In addition, // a RateLimitRequest can contain multiple "descriptors" to limit on. When multiple descriptors // are provided, the server will limit on *ALL* of them and return an OVER_LIMIT response if any // of them are over limit. This enables more complex application level rate limiting scenarios // if desired. message RateLimitRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitRequest"; // All rate limit requests must specify a domain. This enables the configuration to be per // application without fear of overlap. E.g., "envoy". string domain = 1; // All rate limit requests must specify at least one RateLimitDescriptor. Each descriptor is // processed by the service (see below). If any of the descriptors are over limit, the entire // request is considered to be over limit. repeated envoy.extensions.common.ratelimit.v3.RateLimitDescriptor descriptors = 2; // Rate limit requests can optionally specify the number of hits a request adds to the matched // limit. If the value is not set in the message, a request increases the matched limit by 1. uint32 hits_addend = 3; } // A response from a ShouldRateLimit call. message RateLimitResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitResponse"; enum Code { // The response code is not known. UNKNOWN = 0; // The response code to notify that the number of requests are under limit. OK = 1; // The response code to notify that the number of requests are over limit. OVER_LIMIT = 2; } // Defines an actual rate limit in terms of requests per unit of time and the unit itself. message RateLimit { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitResponse.RateLimit"; // Identifies the unit of of time for rate limit. // [#comment: replace by envoy/type/v3/ratelimit_unit.proto in v4] enum Unit { // The time unit is not known. UNKNOWN = 0; // The time unit representing a second. SECOND = 1; // The time unit representing a minute. MINUTE = 2; // The time unit representing an hour. HOUR = 3; // The time unit representing a day. DAY = 4; } // A name or description of this limit. string name = 3; // The number of requests per unit of time. uint32 requests_per_unit = 1; // The unit of time. Unit unit = 2; } message DescriptorStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.service.ratelimit.v2.RateLimitResponse.DescriptorStatus"; // The response code for an individual descriptor. Code code = 1; // The current limit as configured by the server. Useful for debugging, etc. RateLimit current_limit = 2; // The limit remaining in the current time unit. uint32 limit_remaining = 3; // Duration until reset of the current limit window. google.protobuf.Duration duration_until_reset = 4; } // The overall response code which takes into account all of the descriptors that were passed // in the RateLimitRequest message. Code overall_code = 1; // A list of DescriptorStatus messages which matches the length of the descriptor list passed // in the RateLimitRequest. This can be used by the caller to determine which individual // descriptors failed and/or what the currently configured limits are for all of them. repeated DescriptorStatus statuses = 2; // A list of headers to add to the response repeated config.core.v3.HeaderValue response_headers_to_add = 3; // A list of headers to add to the request when forwarded repeated config.core.v3.HeaderValue request_headers_to_add = 4; } ================================================ FILE: api/envoy/service/route/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/annotations:pkg", "//envoy/api/v2:pkg", "//envoy/service/discovery/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/route/v3/rds.proto ================================================ syntax = "proto3"; package envoy.service.route.v3; import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/wrappers.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.route.v3"; option java_outer_classname = "RdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: RDS] // The resource_names field in DiscoveryRequest specifies a route configuration. // This allows an Envoy configuration with multiple HTTP listeners (and // associated HTTP connection manager filters) to use different route // configurations. Each listener will bind its HTTP connection manager filter to // a route table via this identifier. service RouteDiscoveryService { option (envoy.annotations.resource).type = "envoy.config.route.v3.RouteConfiguration"; rpc StreamRoutes(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } rpc DeltaRoutes(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } rpc FetchRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { option (google.api.http).post = "/v3/discovery:routes"; option (google.api.http).body = "*"; } } // Virtual Host Discovery Service (VHDS) is used to dynamically update the list of virtual hosts for // a given RouteConfiguration. If VHDS is configured a virtual host list update will be triggered // during the processing of an HTTP request if a route for the request cannot be resolved. The // :ref:`resource_names_subscribe ` // field contains a list of virtual host names or aliases to track. The contents of an alias would // be the contents of a *host* or *authority* header used to make an http request. An xDS server // will match an alias to a virtual host based on the content of :ref:`domains' // ` field. The *resource_names_unsubscribe* field // contains a list of virtual host names that have been :ref:`unsubscribed // ` from the routing table associated with the RouteConfiguration. service VirtualHostDiscoveryService { option (envoy.annotations.resource).type = "envoy.config.route.v3.VirtualHost"; rpc DeltaVirtualHosts(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message RdsDummy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.RdsDummy"; } ================================================ FILE: api/envoy/service/route/v3/srds.proto ================================================ syntax = "proto3"; package envoy.service.route.v3; import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.route.v3"; option java_outer_classname = "SrdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: SRDS] // * Routing :ref:`architecture overview ` // The Scoped Routes Discovery Service (SRDS) API distributes // :ref:`ScopedRouteConfiguration` // resources. Each ScopedRouteConfiguration resource represents a "routing // scope" containing a mapping that allows the HTTP connection manager to // dynamically assign a routing table (specified via a // :ref:`RouteConfiguration` message) to each // HTTP request. service ScopedRoutesDiscoveryService { option (envoy.annotations.resource).type = "envoy.config.route.v3.ScopedRouteConfiguration"; rpc StreamScopedRoutes(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } rpc DeltaScopedRoutes(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } rpc FetchScopedRoutes(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { option (google.api.http).post = "/v3/discovery:scoped-routes"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. message SrdsDummy { option (udpa.annotations.versioning).previous_message_type = "envoy.api.v2.SrdsDummy"; } ================================================ FILE: api/envoy/service/runtime/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/annotations:pkg", "//envoy/service/discovery/v2:pkg", "//envoy/service/discovery/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/runtime/v3/rtds.proto ================================================ syntax = "proto3"; package envoy.service.runtime.v3; import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.runtime.v3"; option java_outer_classname = "RtdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Runtime Discovery Service (RTDS)] // RTDS :ref:`configuration overview ` // Discovery service for Runtime resources. service RuntimeDiscoveryService { option (envoy.annotations.resource).type = "envoy.service.runtime.v3.Runtime"; rpc StreamRuntime(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } rpc DeltaRuntime(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } rpc FetchRuntime(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { option (google.api.http).post = "/v3/discovery:runtime"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 message RtdsDummy { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.RtdsDummy"; } // RTDS resource type. This describes a layer in the runtime virtual filesystem. message Runtime { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.Runtime"; // Runtime resource name. This makes the Runtime a self-describing xDS // resource. string name = 1 [(validate.rules).string = {min_len: 1}]; google.protobuf.Struct layer = 2; } ================================================ FILE: api/envoy/service/secret/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/annotations:pkg", "//envoy/service/discovery/v2:pkg", "//envoy/service/discovery/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/secret/v3/sds.proto ================================================ syntax = "proto3"; package envoy.service.secret.v3; import "envoy/service/discovery/v3/discovery.proto"; import "google/api/annotations.proto"; import "envoy/annotations/resource.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.secret.v3"; option java_outer_classname = "SdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Secret Discovery Service (SDS)] service SecretDiscoveryService { option (envoy.annotations.resource).type = "envoy.extensions.transport_sockets.tls.v3.Secret"; rpc DeltaSecrets(stream discovery.v3.DeltaDiscoveryRequest) returns (stream discovery.v3.DeltaDiscoveryResponse) { } rpc StreamSecrets(stream discovery.v3.DiscoveryRequest) returns (stream discovery.v3.DiscoveryResponse) { } rpc FetchSecrets(discovery.v3.DiscoveryRequest) returns (discovery.v3.DiscoveryResponse) { option (google.api.http).post = "/v3/discovery:secrets"; option (google.api.http).body = "*"; } } // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 message SdsDummy { option (udpa.annotations.versioning).previous_message_type = "envoy.service.discovery.v2.SdsDummy"; } ================================================ FILE: api/envoy/service/status/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/admin/v2alpha:pkg", "//envoy/api/v2/core:pkg", "//envoy/type/matcher:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/status/v2/csds.proto ================================================ syntax = "proto3"; package envoy.service.status.v2; import "envoy/admin/v2alpha/config_dump.proto"; import "envoy/api/v2/core/base.proto"; import "envoy/type/matcher/node.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.service.status.v2"; option java_outer_classname = "CsdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Client Status Discovery Service (CSDS)] // CSDS is Client Status Discovery Service. It can be used to get the status of // an xDS-compliant client from the management server's point of view. In the // future, it can potentially be used as an interface to get the current // state directly from the client. service ClientStatusDiscoveryService { rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { } rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { option (google.api.http).post = "/v2/discovery:client_status"; option (google.api.http).body = "*"; } } // Status of a config. enum ConfigStatus { // Status info is not available/unknown. UNKNOWN = 0; // Management server has sent the config to client and received ACK. SYNCED = 1; // Config is not sent. NOT_SENT = 2; // Management server has sent the config to client but hasn’t received // ACK/NACK. STALE = 3; // Management server has sent the config to client but received NACK. ERROR = 4; } // Request for client status of clients identified by a list of NodeMatchers. message ClientStatusRequest { // Management server can use these match criteria to identify clients. // The match follows OR semantics. repeated type.matcher.NodeMatcher node_matchers = 1; } // Detailed config (per xDS) with status. // [#next-free-field: 6] message PerXdsConfig { ConfigStatus status = 1; oneof per_xds_config { admin.v2alpha.ListenersConfigDump listener_config = 2; admin.v2alpha.ClustersConfigDump cluster_config = 3; admin.v2alpha.RoutesConfigDump route_config = 4; admin.v2alpha.ScopedRoutesConfigDump scoped_route_config = 5; } } // All xds configs for a particular client. message ClientConfig { // Node for a particular client. api.v2.core.Node node = 1; repeated PerXdsConfig xds_config = 2; } message ClientStatusResponse { // Client configs for the clients specified in the ClientStatusRequest. repeated ClientConfig config = 1; } ================================================ FILE: api/envoy/service/status/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/admin/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/service/status/v2:pkg", "//envoy/type/matcher/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/status/v3/csds.proto ================================================ syntax = "proto3"; package envoy.service.status.v3; import "envoy/admin/v3/config_dump.proto"; import "envoy/config/core/v3/base.proto"; import "envoy/type/matcher/v3/node.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.status.v3"; option java_outer_classname = "CsdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Client Status Discovery Service (CSDS)] // CSDS is Client Status Discovery Service. It can be used to get the status of // an xDS-compliant client from the management server's point of view. It can // also be used to get the current xDS states directly from the client. service ClientStatusDiscoveryService { rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { } rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { option (google.api.http).post = "/v3/discovery:client_status"; option (google.api.http).body = "*"; } } // Status of a config from a management server view. enum ConfigStatus { // Status info is not available/unknown. UNKNOWN = 0; // Management server has sent the config to client and received ACK. SYNCED = 1; // Config is not sent. NOT_SENT = 2; // Management server has sent the config to client but hasn’t received // ACK/NACK. STALE = 3; // Management server has sent the config to client but received NACK. The // attached config dump will be the latest config (the rejected one), since // it is the persisted version in the management server. ERROR = 4; } // Config status from a client-side view. enum ClientConfigStatus { // Config status is not available/unknown. CLIENT_UNKNOWN = 0; // Client requested the config but hasn't received any config from management // server yet. CLIENT_REQUESTED = 1; // Client received the config and replied with ACK. CLIENT_ACKED = 2; // Client received the config and replied with NACK. Notably, the attached // config dump is not the NACKed version, but the most recent accepted one. If // no config is accepted yet, the attached config dump will be empty. CLIENT_NACKED = 3; } // Request for client status of clients identified by a list of NodeMatchers. message ClientStatusRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.ClientStatusRequest"; // Management server can use these match criteria to identify clients. // The match follows OR semantics. repeated type.matcher.v3.NodeMatcher node_matchers = 1; // The node making the csds request. config.core.v3.Node node = 2; } // Detailed config (per xDS) with status. // [#next-free-field: 8] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.PerXdsConfig"; // Config status generated by management servers. Will not be present if the // CSDS server is an xDS client. ConfigStatus status = 1 [(udpa.annotations.field_migrate).oneof_promotion = "status_config"]; // Client config status is populated by xDS clients. Will not be present if // the CSDS server is an xDS server. No matter what the client config status // is, xDS clients should always dump the most recent accepted xDS config. ClientConfigStatus client_status = 7 [(udpa.annotations.field_migrate).oneof_promotion = "status_config"]; oneof per_xds_config { admin.v3.ListenersConfigDump listener_config = 2; admin.v3.ClustersConfigDump cluster_config = 3; admin.v3.RoutesConfigDump route_config = 4; admin.v3.ScopedRoutesConfigDump scoped_route_config = 5; admin.v3.EndpointsConfigDump endpoint_config = 6; } } // All xds configs for a particular client. message ClientConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.ClientConfig"; // Node for a particular client. config.core.v3.Node node = 1; repeated PerXdsConfig xds_config = 2; } message ClientStatusResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v2.ClientStatusResponse"; // Client configs for the clients specified in the ClientStatusRequest. repeated ClientConfig config = 1; } ================================================ FILE: api/envoy/service/status/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/admin/v4alpha:pkg", "//envoy/config/core/v4alpha:pkg", "//envoy/service/status/v3:pkg", "//envoy/type/matcher/v4alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/status/v4alpha/csds.proto ================================================ syntax = "proto3"; package envoy.service.status.v4alpha; import "envoy/admin/v4alpha/config_dump.proto"; import "envoy/config/core/v4alpha/base.proto"; import "envoy/type/matcher/v4alpha/node.proto"; import "google/api/annotations.proto"; import "google/protobuf/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.service.status.v4alpha"; option java_outer_classname = "CsdsProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Client Status Discovery Service (CSDS)] // CSDS is Client Status Discovery Service. It can be used to get the status of // an xDS-compliant client from the management server's point of view. It can // also be used to get the current xDS states directly from the client. service ClientStatusDiscoveryService { rpc StreamClientStatus(stream ClientStatusRequest) returns (stream ClientStatusResponse) { } rpc FetchClientStatus(ClientStatusRequest) returns (ClientStatusResponse) { option (google.api.http).post = "/v3/discovery:client_status"; option (google.api.http).body = "*"; } } // Status of a config from a management server view. enum ConfigStatus { // Status info is not available/unknown. UNKNOWN = 0; // Management server has sent the config to client and received ACK. SYNCED = 1; // Config is not sent. NOT_SENT = 2; // Management server has sent the config to client but hasn’t received // ACK/NACK. STALE = 3; // Management server has sent the config to client but received NACK. The // attached config dump will be the latest config (the rejected one), since // it is the persisted version in the management server. ERROR = 4; } // Config status from a client-side view. enum ClientConfigStatus { // Config status is not available/unknown. CLIENT_UNKNOWN = 0; // Client requested the config but hasn't received any config from management // server yet. CLIENT_REQUESTED = 1; // Client received the config and replied with ACK. CLIENT_ACKED = 2; // Client received the config and replied with NACK. Notably, the attached // config dump is not the NACKed version, but the most recent accepted one. If // no config is accepted yet, the attached config dump will be empty. CLIENT_NACKED = 3; } // Request for client status of clients identified by a list of NodeMatchers. message ClientStatusRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v3.ClientStatusRequest"; // Management server can use these match criteria to identify clients. // The match follows OR semantics. repeated type.matcher.v4alpha.NodeMatcher node_matchers = 1; // The node making the csds request. config.core.v4alpha.Node node = 2; } // Detailed config (per xDS) with status. // [#next-free-field: 8] message PerXdsConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v3.PerXdsConfig"; oneof status_config { // Config status generated by management servers. Will not be present if the // CSDS server is an xDS client. ConfigStatus status = 1; // Client config status is populated by xDS clients. Will not be present if // the CSDS server is an xDS server. No matter what the client config status // is, xDS clients should always dump the most recent accepted xDS config. ClientConfigStatus client_status = 7; } oneof per_xds_config { admin.v4alpha.ListenersConfigDump listener_config = 2; admin.v4alpha.ClustersConfigDump cluster_config = 3; admin.v4alpha.RoutesConfigDump route_config = 4; admin.v4alpha.ScopedRoutesConfigDump scoped_route_config = 5; admin.v4alpha.EndpointsConfigDump endpoint_config = 6; } } // All xds configs for a particular client. message ClientConfig { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v3.ClientConfig"; // Node for a particular client. config.core.v4alpha.Node node = 1; repeated PerXdsConfig xds_config = 2; } message ClientStatusResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.status.v3.ClientStatusResponse"; // Client configs for the clients specified in the ClientStatusRequest. repeated ClientConfig config = 1; } ================================================ FILE: api/envoy/service/tap/v2alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", "//envoy/api/v2/route:pkg", "//envoy/data/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/tap/v2alpha/common.proto ================================================ syntax = "proto3"; package envoy.service.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/api/v2/core/grpc_service.proto"; import "envoy/api/v2/route/route_components.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; option java_outer_classname = "CommonProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.config.tap.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Common tap configuration] // Tap configuration. message TapConfig { // [#comment:TODO(mattklein123): Rate limiting] // The match configuration. If the configuration matches the data source being tapped, a tap will // occur, with the result written to the configured output. MatchPredicate match_config = 1 [(validate.rules).message = {required: true}]; // The tap output configuration. If a match configuration matches a data source being tapped, // a tap will occur and the data will be written to the configured output. OutputConfig output_config = 2 [(validate.rules).message = {required: true}]; // [#not-implemented-hide:] Specify if Tap matching is enabled. The % of requests\connections for // which the tap matching is enabled. When not enabled, the request\connection will not be // recorded. // // .. note:: // // This field defaults to 100/:ref:`HUNDRED // `. api.v2.core.RuntimeFractionalPercent tap_enabled = 3; } // Tap match configuration. This is a recursive structure which allows complex nested match // configurations to be built using various logical operators. // [#next-free-field: 9] message MatchPredicate { // A set of match configurations used for logical operations. message MatchSet { // The list of rules that make up the set. repeated MatchPredicate rules = 1 [(validate.rules).repeated = {min_items: 2}]; } oneof rule { option (validate.required) = true; // A set that describes a logical OR. If any member of the set matches, the match configuration // matches. MatchSet or_match = 1; // A set that describes a logical AND. If all members of the set match, the match configuration // matches. MatchSet and_match = 2; // A negation match. The match configuration will match if the negated match condition matches. MatchPredicate not_match = 3; // The match configuration will always match. bool any_match = 4 [(validate.rules).bool = {const: true}]; // HTTP request headers match configuration. HttpHeadersMatch http_request_headers_match = 5; // HTTP request trailers match configuration. HttpHeadersMatch http_request_trailers_match = 6; // HTTP response headers match configuration. HttpHeadersMatch http_response_headers_match = 7; // HTTP response trailers match configuration. HttpHeadersMatch http_response_trailers_match = 8; } } // HTTP headers match configuration. message HttpHeadersMatch { // HTTP headers to match. repeated api.v2.route.HeaderMatcher headers = 1; } // Tap output configuration. message OutputConfig { // Output sinks for tap data. Currently a single sink is allowed in the list. Once multiple // sink types are supported this constraint will be relaxed. repeated OutputSink sinks = 1 [(validate.rules).repeated = {min_items: 1 max_items: 1}]; // For buffered tapping, the maximum amount of received body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated // ` field will be set. If not specified, the // default is 1KiB. google.protobuf.UInt32Value max_buffered_rx_bytes = 2; // For buffered tapping, the maximum amount of transmitted body that will be buffered prior to // truncation. If truncation occurs, the :ref:`truncated // ` field will be set. If not specified, the // default is 1KiB. google.protobuf.UInt32Value max_buffered_tx_bytes = 3; // Indicates whether taps produce a single buffered message per tap, or multiple streamed // messages per tap in the emitted :ref:`TraceWrapper // ` messages. Note that streamed tapping does not // mean that no buffering takes place. Buffering may be required if data is processed before a // match can be determined. See the HTTP tap filter :ref:`streaming // ` documentation for more information. bool streaming = 4; } // Tap output sink configuration. message OutputSink { // Output format. All output is in the form of one or more :ref:`TraceWrapper // ` messages. This enumeration indicates // how those messages are written. Note that not all sinks support all output formats. See // individual sink documentation for more information. enum Format { // Each message will be written as JSON. Any :ref:`body ` // data will be present in the :ref:`as_bytes // ` field. This means that body data will be // base64 encoded as per the `proto3 JSON mappings // `_. JSON_BODY_AS_BYTES = 0; // Each message will be written as JSON. Any :ref:`body ` // data will be present in the :ref:`as_string // ` field. This means that body data will be // string encoded as per the `proto3 JSON mappings // `_. This format type is // useful when it is known that that body is human readable (e.g., JSON over HTTP) and the // user wishes to view it directly without being forced to base64 decode the body. JSON_BODY_AS_STRING = 1; // Binary proto format. Note that binary proto is not self-delimiting. If a sink writes // multiple binary messages without any length information the data stream will not be // useful. However, for certain sinks that are self-delimiting (e.g., one message per file) // this output format makes consumption simpler. PROTO_BINARY = 2; // Messages are written as a sequence tuples, where each tuple is the message length encoded // as a `protobuf 32-bit varint // `_ // followed by the binary message. The messages can be read back using the language specific // protobuf coded stream implementation to obtain the message length and the message. PROTO_BINARY_LENGTH_DELIMITED = 3; // Text proto format. PROTO_TEXT = 4; } // Sink output format. Format format = 1 [(validate.rules).enum = {defined_only: true}]; oneof output_sink_type { option (validate.required) = true; // Tap output will be streamed out the :http:post:`/tap` admin endpoint. // // .. attention:: // // It is only allowed to specify the streaming admin output sink if the tap is being // configured from the :http:post:`/tap` admin endpoint. Thus, if an extension has // been configured to receive tap configuration from some other source (e.g., static // file, XDS, etc.) configuring the streaming admin output type will fail. StreamingAdminSink streaming_admin = 2; // Tap output will be written to a file per tap sink. FilePerTapSink file_per_tap = 3; // [#not-implemented-hide:] // GrpcService to stream data to. The format argument must be PROTO_BINARY. StreamingGrpcSink streaming_grpc = 4; } } // Streaming admin sink configuration. message StreamingAdminSink { } // The file per tap sink outputs a discrete file for every tapped stream. message FilePerTapSink { // Path prefix. The output file will be of the form _.pb, where is an // identifier distinguishing the recorded trace for stream instances (the Envoy // connection ID, HTTP stream ID, etc.). string path_prefix = 1 [(validate.rules).string = {min_bytes: 1}]; } // [#not-implemented-hide:] Streaming gRPC sink configuration sends the taps to an external gRPC // server. message StreamingGrpcSink { // Opaque identifier, that will be sent back to the streaming grpc server. string tap_id = 1; // The gRPC server that hosts the Tap Sink Service. api.v2.core.GrpcService grpc_service = 2 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/service/tap/v2alpha/tap.proto ================================================ syntax = "proto3"; package envoy.service.tap.v2alpha; import "envoy/api/v2/core/base.proto"; import "envoy/data/tap/v2alpha/wrapper.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v2alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Tap Sink Service] // [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call // StreamTaps to deliver captured taps to the server service TapSinkService { // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any // response to be sent as nothing would be done in the case of failure. The server should // disconnect if it expects Envoy to reconnect. rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { } } // [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server // and stream taps without ever expecting a response. message StreamTapsRequest { message Identifier { // The node sending taps over the stream. api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; // The opaque identifier that was set in the :ref:`output config // `. string tap_id = 2; } // Identifier data effectively is a structured metadata. As a performance optimization this will // only be sent in the first message on the stream. Identifier identifier = 1; // The trace id. this can be used to merge together a streaming trace. Note that the trace_id // is not guaranteed to be spatially or temporally unique. uint64 trace_id = 2; // The trace data. data.tap.v2alpha.TraceWrapper trace = 3; } // [#not-implemented-hide:] message StreamTapsResponse { } ================================================ FILE: api/envoy/service/tap/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", "//envoy/data/tap/v3:pkg", "//envoy/service/tap/v2alpha:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/tap/v3/tap.proto ================================================ syntax = "proto3"; package envoy.service.tap.v3; import "envoy/config/core/v3/base.proto"; import "envoy/data/tap/v3/wrapper.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v3"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Tap Sink Service] // [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call // StreamTaps to deliver captured taps to the server service TapSinkService { // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any // response to be sent as nothing would be done in the case of failure. The server should // disconnect if it expects Envoy to reconnect. rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { } } // [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server // and stream taps without ever expecting a response. message StreamTapsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.StreamTapsRequest"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.StreamTapsRequest.Identifier"; // The node sending taps over the stream. config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; // The opaque identifier that was set in the :ref:`output config // `. string tap_id = 2; } // Identifier data effectively is a structured metadata. As a performance optimization this will // only be sent in the first message on the stream. Identifier identifier = 1; // The trace id. this can be used to merge together a streaming trace. Note that the trace_id // is not guaranteed to be spatially or temporally unique. uint64 trace_id = 2; // The trace data. data.tap.v3.TraceWrapper trace = 3; } // [#not-implemented-hide:] message StreamTapsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v2alpha.StreamTapsResponse"; } ================================================ FILE: api/envoy/service/tap/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/data/tap/v3:pkg", "//envoy/service/tap/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/service/tap/v4alpha/tap.proto ================================================ syntax = "proto3"; package envoy.service.tap.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "envoy/data/tap/v3/wrapper.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.tap.v4alpha"; option java_outer_classname = "TapProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Tap Sink Service] // [#not-implemented-hide:] A tap service to receive incoming taps. Envoy will call // StreamTaps to deliver captured taps to the server service TapSinkService { // Envoy will connect and send StreamTapsRequest messages forever. It does not expect any // response to be sent as nothing would be done in the case of failure. The server should // disconnect if it expects Envoy to reconnect. rpc StreamTaps(stream StreamTapsRequest) returns (StreamTapsResponse) { } } // [#not-implemented-hide:] Stream message for the Tap API. Envoy will open a stream to the server // and stream taps without ever expecting a response. message StreamTapsRequest { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.StreamTapsRequest"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.StreamTapsRequest.Identifier"; // The node sending taps over the stream. config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; // The opaque identifier that was set in the :ref:`output config // `. string tap_id = 2; } // Identifier data effectively is a structured metadata. As a performance optimization this will // only be sent in the first message on the stream. Identifier identifier = 1; // The trace id. this can be used to merge together a streaming trace. Note that the trace_id // is not guaranteed to be spatially or temporally unique. uint64 trace_id = 2; // The trace data. data.tap.v3.TraceWrapper trace = 3; } // [#not-implemented-hide:] message StreamTapsResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.tap.v3.StreamTapsResponse"; } ================================================ FILE: api/envoy/service/trace/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/api/v2/core:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", ], ) ================================================ FILE: api/envoy/service/trace/v2/trace_service.proto ================================================ syntax = "proto3"; package envoy.service.trace.v2; import "envoy/api/v2/core/base.proto"; import "google/api/annotations.proto"; import "opencensus/proto/trace/v1/trace.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.trace.v2"; option java_outer_classname = "TraceServiceProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Trace service] // Service for streaming traces to server that consumes the trace data. It // uses OpenCensus data model as a standard to represent trace information. service TraceService { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { } } message StreamTracesResponse { } message StreamTracesMessage { message Identifier { // The node sending the access log messages over the stream. api.v2.core.Node node = 1 [(validate.rules).message = {required: true}]; } // Identifier data effectively is a structured metadata. // As a performance optimization this will only be sent in the first message // on the stream. Identifier identifier = 1; // A list of Span entries repeated opencensus.proto.trace.v1.Span spans = 2; } ================================================ FILE: api/envoy/service/trace/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v3:pkg", "//envoy/service/trace/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", ], ) ================================================ FILE: api/envoy/service/trace/v3/trace_service.proto ================================================ syntax = "proto3"; package envoy.service.trace.v3; import "envoy/config/core/v3/base.proto"; import "google/api/annotations.proto"; import "opencensus/proto/trace/v1/trace.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.trace.v3"; option java_outer_classname = "TraceServiceProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Trace service] // Service for streaming traces to server that consumes the trace data. It // uses OpenCensus data model as a standard to represent trace information. service TraceService { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { } } message StreamTracesResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.trace.v2.StreamTracesResponse"; } message StreamTracesMessage { option (udpa.annotations.versioning).previous_message_type = "envoy.service.trace.v2.StreamTracesMessage"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.trace.v2.StreamTracesMessage.Identifier"; // The node sending the access log messages over the stream. config.core.v3.Node node = 1 [(validate.rules).message = {required: true}]; } // Identifier data effectively is a structured metadata. // As a performance optimization this will only be sent in the first message // on the stream. Identifier identifier = 1; // A list of Span entries repeated opencensus.proto.trace.v1.Span spans = 2; } ================================================ FILE: api/envoy/service/trace/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( has_services = True, deps = [ "//envoy/config/core/v4alpha:pkg", "//envoy/service/trace/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", "@opencensus_proto//opencensus/proto/trace/v1:trace_proto", ], ) ================================================ FILE: api/envoy/service/trace/v4alpha/trace_service.proto ================================================ syntax = "proto3"; package envoy.service.trace.v4alpha; import "envoy/config/core/v4alpha/base.proto"; import "google/api/annotations.proto"; import "opencensus/proto/trace/v1/trace.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.service.trace.v4alpha"; option java_outer_classname = "TraceServiceProto"; option java_multiple_files = true; option java_generic_services = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Trace service] // Service for streaming traces to server that consumes the trace data. It // uses OpenCensus data model as a standard to represent trace information. service TraceService { // Envoy will connect and send StreamTracesMessage messages forever. It does // not expect any response to be sent as nothing would be done in the case // of failure. rpc StreamTraces(stream StreamTracesMessage) returns (StreamTracesResponse) { } } message StreamTracesResponse { option (udpa.annotations.versioning).previous_message_type = "envoy.service.trace.v3.StreamTracesResponse"; } message StreamTracesMessage { option (udpa.annotations.versioning).previous_message_type = "envoy.service.trace.v3.StreamTracesMessage"; message Identifier { option (udpa.annotations.versioning).previous_message_type = "envoy.service.trace.v3.StreamTracesMessage.Identifier"; // The node sending the access log messages over the stream. config.core.v4alpha.Node node = 1 [(validate.rules).message = {required: true}]; } // Identifier data effectively is a structured metadata. // As a performance optimization this will only be sent in the first message // on the stream. Identifier identifier = 1; // A list of Span entries repeated opencensus.proto.trace.v1.Span spans = 2; } ================================================ FILE: api/envoy/type/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/type/hash_policy.proto ================================================ syntax = "proto3"; package envoy.type; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "HashPolicyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Hash Policy] // Specifies the hash policy message HashPolicy { // The source IP will be used to compute the hash used by hash-based load balancing // algorithms. message SourceIp { } oneof policy_specifier { option (validate.required) = true; SourceIp source_ip = 1; } } ================================================ FILE: api/envoy/type/http.proto ================================================ syntax = "proto3"; package envoy.type; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP] enum CodecClientType { HTTP1 = 0; HTTP2 = 1; // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient // to distinguish HTTP1 and HTTP2 traffic. HTTP3 = 2; } ================================================ FILE: api/envoy/type/http_status.proto ================================================ syntax = "proto3"; package envoy.type; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "HttpStatusProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: HTTP status codes] // HTTP response codes supported in Envoy. // For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml enum StatusCode { // Empty - This code not part of the HTTP status code specification, but it is needed for proto // `enum` type. Empty = 0; Continue = 100; OK = 200; Created = 201; Accepted = 202; NonAuthoritativeInformation = 203; NoContent = 204; ResetContent = 205; PartialContent = 206; MultiStatus = 207; AlreadyReported = 208; IMUsed = 226; MultipleChoices = 300; MovedPermanently = 301; Found = 302; SeeOther = 303; NotModified = 304; UseProxy = 305; TemporaryRedirect = 307; PermanentRedirect = 308; BadRequest = 400; Unauthorized = 401; PaymentRequired = 402; Forbidden = 403; NotFound = 404; MethodNotAllowed = 405; NotAcceptable = 406; ProxyAuthenticationRequired = 407; RequestTimeout = 408; Conflict = 409; Gone = 410; LengthRequired = 411; PreconditionFailed = 412; PayloadTooLarge = 413; URITooLong = 414; UnsupportedMediaType = 415; RangeNotSatisfiable = 416; ExpectationFailed = 417; MisdirectedRequest = 421; UnprocessableEntity = 422; Locked = 423; FailedDependency = 424; UpgradeRequired = 426; PreconditionRequired = 428; TooManyRequests = 429; RequestHeaderFieldsTooLarge = 431; InternalServerError = 500; NotImplemented = 501; BadGateway = 502; ServiceUnavailable = 503; GatewayTimeout = 504; HTTPVersionNotSupported = 505; VariantAlsoNegotiates = 506; InsufficientStorage = 507; LoopDetected = 508; NotExtended = 510; NetworkAuthenticationRequired = 511; } // HTTP status. message HttpStatus { // Supplies HTTP response code. StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; } ================================================ FILE: api/envoy/type/matcher/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/type/matcher/metadata.proto ================================================ syntax = "proto3"; package envoy.type.matcher; import "envoy/type/matcher/value.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metadata matcher] // MetadataMatcher provides a general interface to check if a given value is matched in // :ref:`Metadata `. It uses `filter` and `path` to retrieve the value // from the Metadata and then check if it's matched to the specified value. // // For example, for the following Metadata: // // .. code-block:: yaml // // filter_metadata: // envoy.filters.http.rbac: // fields: // a: // struct_value: // fields: // b: // struct_value: // fields: // c: // string_value: pro // t: // list_value: // values: // - string_value: m // - string_value: n // // The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" // from the Metadata which is matched to the specified prefix match. // // .. code-block:: yaml // // filter: envoy.filters.http.rbac // path: // - key: a // - key: b // - key: c // value: // string_match: // prefix: pr // // The following MetadataMatcher is matched as the code will match one of the string values in the // list at the path [a, t]. // // .. code-block:: yaml // // filter: envoy.filters.http.rbac // path: // - key: a // - key: t // value: // list_match: // one_of: // string_match: // exact: m // // An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to // enforce access control based on dynamic metadata in a request. See :ref:`Permission // ` and :ref:`Principal // `. // [#next-major-version: MetadataMatcher should use StructMatcher] message MetadataMatcher { // Specifies the segment in a path to retrieve value from Metadata. // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that // if the segment key refers to a list, it has to be the last segment in a path. message PathSegment { oneof segment { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The filter name to retrieve the Struct from the Metadata. string filter = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; // The MetadataMatcher is matched if the value retrieved by path is matched to this value. ValueMatcher value = 3 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/type/matcher/node.proto ================================================ syntax = "proto3"; package envoy.type.matcher; import "envoy/type/matcher/string.proto"; import "envoy/type/matcher/struct.proto"; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "NodeProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Node matcher] // Specifies the way to match a Node. // The match follows AND semantics. message NodeMatcher { // Specifies match criteria on the node id. StringMatcher node_id = 1; // Specifies match criteria on the node metadata. repeated StructMatcher node_metadatas = 2; } ================================================ FILE: api/envoy/type/matcher/number.proto ================================================ syntax = "proto3"; package envoy.type.matcher; import "envoy/type/range.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "NumberProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Number matcher] // Specifies the way to match a double value. message DoubleMatcher { oneof match_pattern { option (validate.required) = true; // If specified, the input double value must be in the range specified here. // Note: The range is using half-open interval semantics [start, end). DoubleRange range = 1; // If specified, the input double value must be equal to the value specified here. double exact = 2; } } ================================================ FILE: api/envoy/type/matcher/path.proto ================================================ syntax = "proto3"; package envoy.type.matcher; import "envoy/type/matcher/string.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "PathProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Path matcher] // Specifies the way to match a path on HTTP request. message PathMatcher { oneof rule { option (validate.required) = true; // The `path` must match the URL path portion of the :path header. The query and fragment // string (if present) are removed in the URL path portion. // For example, the path */data* will match the *:path* header */data#fragment?param=value*. StringMatcher path = 1 [(validate.rules).message = {required: true}]; } } ================================================ FILE: api/envoy/type/matcher/regex.proto ================================================ syntax = "proto3"; package envoy.type.matcher; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "RegexProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Regex matcher] // A regex matcher designed for safety when used with untrusted input. message RegexMatcher { // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. // // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or // complexity that a compiled regex can have before an exception is thrown or a warning is // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). // // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented // each time the program size exceeds the warn level threshold. message GoogleRE2 { // This field controls the RE2 "program size" which is a rough estimate of how complex a // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. // // This field is deprecated; regexp validation should be performed on the management server // instead of being done by each individual client. google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } oneof engine_type { option (validate.required) = true; // Google's RE2 regex engine. GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; } // The regex match string. The string must be supported by the configured engine. string regex = 2 [(validate.rules).string = {min_len: 1}]; } // Describes how to match a string and then produce a new string using a regular // expression and a substitution string. message RegexMatchAndSubstitute { // The regular expression used to find portions of a string (hereafter called // the "subject string") that should be replaced. When a new string is // produced during the substitution operation, the new string is initially // the same as the subject string, but then all matches in the subject string // are replaced by the substitution string. If replacing all matches isn't // desired, regular expression anchors can be used to ensure a single match, // so as to replace just one occurrence of a pattern. Capture groups can be // used in the pattern to extract portions of the subject string, and then // referenced in the substitution string. RegexMatcher pattern = 1; // The string that should be substituted into matching portions of the // subject string during a substitution operation to produce a new string. // Capture groups in the pattern can be referenced in the substitution // string. Note, however, that the syntax for referring to capture groups is // defined by the chosen regular expression engine. Google's `RE2 // `_ regular expression engine uses a // backslash followed by the capture group number to denote a numbered // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers // to capture group 2. string substitution = 2; } ================================================ FILE: api/envoy/type/matcher/string.proto ================================================ syntax = "proto3"; package envoy.type.matcher; import "envoy/type/matcher/regex.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "StringProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: String matcher] // Specifies the way to match a string. // [#next-free-field: 7] message StringMatcher { oneof match_pattern { option (validate.required) = true; // The input string must match exactly the string specified here. // // Examples: // // * *abc* only matches the value *abc*. string exact = 1; // The input string must have the prefix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *abc.xyz* string prefix = 2 [(validate.rules).string = {min_len: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *xyz.abc* string suffix = 3 [(validate.rules).string = {min_len: 1}]; // The input string must match the regular expression specified here. // The regex grammar is defined `here // `_. // // Examples: // // * The regex ``\d{3}`` matches the value *123* // * The regex ``\d{3}`` does not match the value *1234* // * The regex ``\d{3}`` does not match the value *123.456* // // .. attention:: // This field has been deprecated in favor of `safe_regex` as it is not safe for use with // untrusted input in all cases. string regex = 4 [ deprecated = true, (validate.rules).string = {max_bytes: 1024}, (envoy.annotations.disallowed_by_default) = true ]; // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; } // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no // effect for the safe_regex match. // For example, the matcher *data* will match both input string *Data* and *data* if set to true. bool ignore_case = 6; } // Specifies a list of ways to match a string. message ListStringMatcher { repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/type/matcher/struct.proto ================================================ syntax = "proto3"; package envoy.type.matcher; import "envoy/type/matcher/value.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "StructProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Struct matcher] // StructMatcher provides a general interface to check if a given value is matched in // google.protobuf.Struct. It uses `path` to retrieve the value // from the struct and then check if it's matched to the specified value. // // For example, for the following Struct: // // .. code-block:: yaml // // fields: // a: // struct_value: // fields: // b: // struct_value: // fields: // c: // string_value: pro // t: // list_value: // values: // - string_value: m // - string_value: n // // The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" // from the Metadata which is matched to the specified prefix match. // // .. code-block:: yaml // // path: // - key: a // - key: b // - key: c // value: // string_match: // prefix: pr // // The following StructMatcher is matched as the code will match one of the string values in the // list at the path [a, t]. // // .. code-block:: yaml // // path: // - key: a // - key: t // value: // list_match: // one_of: // string_match: // exact: m // // An example use of StructMatcher is to match metadata in envoy.v*.core.Node. message StructMatcher { // Specifies the segment in a path to retrieve value from Struct. message PathSegment { oneof segment { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; // The StructMatcher is matched if the value retrieved by path is matched to this value. ValueMatcher value = 3 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/type/matcher/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/type/matcher:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/type/matcher/v3/metadata.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v3; import "envoy/type/matcher/v3/value.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metadata matcher] // MetadataMatcher provides a general interface to check if a given value is matched in // :ref:`Metadata `. It uses `filter` and `path` to retrieve the value // from the Metadata and then check if it's matched to the specified value. // // For example, for the following Metadata: // // .. code-block:: yaml // // filter_metadata: // envoy.filters.http.rbac: // fields: // a: // struct_value: // fields: // b: // struct_value: // fields: // c: // string_value: pro // t: // list_value: // values: // - string_value: m // - string_value: n // // The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" // from the Metadata which is matched to the specified prefix match. // // .. code-block:: yaml // // filter: envoy.filters.http.rbac // path: // - key: a // - key: b // - key: c // value: // string_match: // prefix: pr // // The following MetadataMatcher is matched as the code will match one of the string values in the // list at the path [a, t]. // // .. code-block:: yaml // // filter: envoy.filters.http.rbac // path: // - key: a // - key: t // value: // list_match: // one_of: // string_match: // exact: m // // An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to // enforce access control based on dynamic metadata in a request. See :ref:`Permission // ` and :ref:`Principal // `. // [#next-major-version: MetadataMatcher should use StructMatcher] message MetadataMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.MetadataMatcher"; // Specifies the segment in a path to retrieve value from Metadata. // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that // if the segment key refers to a list, it has to be the last segment in a path. message PathSegment { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.MetadataMatcher.PathSegment"; oneof segment { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The filter name to retrieve the Struct from the Metadata. string filter = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; // The MetadataMatcher is matched if the value retrieved by path is matched to this value. ValueMatcher value = 3 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/type/matcher/v3/node.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v3; import "envoy/type/matcher/v3/string.proto"; import "envoy/type/matcher/v3/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "NodeProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Node matcher] // Specifies the way to match a Node. // The match follows AND semantics. message NodeMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.NodeMatcher"; // Specifies match criteria on the node id. StringMatcher node_id = 1; // Specifies match criteria on the node metadata. repeated StructMatcher node_metadatas = 2; } ================================================ FILE: api/envoy/type/matcher/v3/number.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v3; import "envoy/type/v3/range.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "NumberProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Number matcher] // Specifies the way to match a double value. message DoubleMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.DoubleMatcher"; oneof match_pattern { option (validate.required) = true; // If specified, the input double value must be in the range specified here. // Note: The range is using half-open interval semantics [start, end). type.v3.DoubleRange range = 1; // If specified, the input double value must be equal to the value specified here. double exact = 2; } } ================================================ FILE: api/envoy/type/matcher/v3/path.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v3; import "envoy/type/matcher/v3/string.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "PathProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Path matcher] // Specifies the way to match a path on HTTP request. message PathMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.PathMatcher"; oneof rule { option (validate.required) = true; // The `path` must match the URL path portion of the :path header. The query and fragment // string (if present) are removed in the URL path portion. // For example, the path */data* will match the *:path* header */data#fragment?param=value*. StringMatcher path = 1 [(validate.rules).message = {required: true}]; } } ================================================ FILE: api/envoy/type/matcher/v3/regex.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v3; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "RegexProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Regex matcher] // A regex matcher designed for safety when used with untrusted input. message RegexMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher"; // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. // // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or // complexity that a compiled regex can have before an exception is thrown or a warning is // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). // // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented // each time the program size exceeds the warn level threshold. message GoogleRE2 { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatcher.GoogleRE2"; // This field controls the RE2 "program size" which is a rough estimate of how complex a // compiled regex is to evaluate. A regex that has a program size greater than the configured // value will fail to compile. In this case, the configured max program size can be increased // or the regex can be simplified. If not specified, the default is 100. // // This field is deprecated; regexp validation should be performed on the management server // instead of being done by each individual client. google.protobuf.UInt32Value max_program_size = 1 [deprecated = true]; } oneof engine_type { option (validate.required) = true; // Google's RE2 regex engine. GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; } // The regex match string. The string must be supported by the configured engine. string regex = 2 [(validate.rules).string = {min_len: 1}]; } // Describes how to match a string and then produce a new string using a regular // expression and a substitution string. message RegexMatchAndSubstitute { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.RegexMatchAndSubstitute"; // The regular expression used to find portions of a string (hereafter called // the "subject string") that should be replaced. When a new string is // produced during the substitution operation, the new string is initially // the same as the subject string, but then all matches in the subject string // are replaced by the substitution string. If replacing all matches isn't // desired, regular expression anchors can be used to ensure a single match, // so as to replace just one occurrence of a pattern. Capture groups can be // used in the pattern to extract portions of the subject string, and then // referenced in the substitution string. RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; // The string that should be substituted into matching portions of the // subject string during a substitution operation to produce a new string. // Capture groups in the pattern can be referenced in the substitution // string. Note, however, that the syntax for referring to capture groups is // defined by the chosen regular expression engine. Google's `RE2 // `_ regular expression engine uses a // backslash followed by the capture group number to denote a numbered // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers // to capture group 2. string substitution = 2; } ================================================ FILE: api/envoy/type/matcher/v3/string.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v3; import "envoy/type/matcher/v3/regex.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "StringProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: String matcher] // Specifies the way to match a string. // [#next-free-field: 8] message StringMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StringMatcher"; reserved 4; reserved "regex"; oneof match_pattern { option (validate.required) = true; // The input string must match exactly the string specified here. // // Examples: // // * *abc* only matches the value *abc*. string exact = 1; // The input string must have the prefix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *abc.xyz* string prefix = 2 [(validate.rules).string = {min_len: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *xyz.abc* string suffix = 3 [(validate.rules).string = {min_len: 1}]; // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; // The input string must have the substring specified here. // Note: empty contains match is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *xyz.abc.def* string contains = 7 [(validate.rules).string = {min_len: 1}]; } // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no // effect for the safe_regex match. // For example, the matcher *data* will match both input string *Data* and *data* if set to true. bool ignore_case = 6; } // Specifies a list of ways to match a string. message ListStringMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ListStringMatcher"; repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/type/matcher/v3/struct.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v3; import "envoy/type/matcher/v3/value.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "StructProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Struct matcher] // StructMatcher provides a general interface to check if a given value is matched in // google.protobuf.Struct. It uses `path` to retrieve the value // from the struct and then check if it's matched to the specified value. // // For example, for the following Struct: // // .. code-block:: yaml // // fields: // a: // struct_value: // fields: // b: // struct_value: // fields: // c: // string_value: pro // t: // list_value: // values: // - string_value: m // - string_value: n // // The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" // from the Metadata which is matched to the specified prefix match. // // .. code-block:: yaml // // path: // - key: a // - key: b // - key: c // value: // string_match: // prefix: pr // // The following StructMatcher is matched as the code will match one of the string values in the // list at the path [a, t]. // // .. code-block:: yaml // // path: // - key: a // - key: t // value: // list_match: // one_of: // string_match: // exact: m // // An example use of StructMatcher is to match metadata in envoy.v*.core.Node. message StructMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StructMatcher"; // Specifies the segment in a path to retrieve value from Struct. message PathSegment { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.StructMatcher.PathSegment"; oneof segment { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; // The StructMatcher is matched if the value retrieved by path is matched to this value. ValueMatcher value = 3 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/type/matcher/v3/value.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v3; import "envoy/type/matcher/v3/number.proto"; import "envoy/type/matcher/v3/string.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v3"; option java_outer_classname = "ValueProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Value matcher] // Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. // StructValue is not supported and is always not matched. // [#next-free-field: 7] message ValueMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ValueMatcher"; // NullMatch is an empty message to specify a null value. message NullMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ValueMatcher.NullMatch"; } // Specifies how to match a value. oneof match_pattern { option (validate.required) = true; // If specified, a match occurs if and only if the target value is a NullValue. NullMatch null_match = 1; // If specified, a match occurs if and only if the target value is a double value and is // matched to this field. DoubleMatcher double_match = 2; // If specified, a match occurs if and only if the target value is a string value and is // matched to this field. StringMatcher string_match = 3; // If specified, a match occurs if and only if the target value is a bool value and is equal // to this field. bool bool_match = 4; // If specified, value match will be performed based on whether the path is referring to a // valid primitive value in the metadata. If the path is referring to a non-primitive value, // the result is always not matched. bool present_match = 5; // If specified, a match occurs if and only if the target value is a list value and // is matched to this field. ListMatcher list_match = 6; } } // Specifies the way to match a list value. message ListMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.ListMatcher"; oneof match_pattern { option (validate.required) = true; // If specified, at least one of the values in the list must match the value specified. ValueMatcher one_of = 1; } } ================================================ FILE: api/envoy/type/matcher/v4alpha/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/annotations:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/v3:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/type/matcher/v4alpha/metadata.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v4alpha; import "envoy/type/matcher/v4alpha/value.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Metadata matcher] // MetadataMatcher provides a general interface to check if a given value is matched in // :ref:`Metadata `. It uses `filter` and `path` to retrieve the value // from the Metadata and then check if it's matched to the specified value. // // For example, for the following Metadata: // // .. code-block:: yaml // // filter_metadata: // envoy.filters.http.rbac: // fields: // a: // struct_value: // fields: // b: // struct_value: // fields: // c: // string_value: pro // t: // list_value: // values: // - string_value: m // - string_value: n // // The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" // from the Metadata which is matched to the specified prefix match. // // .. code-block:: yaml // // filter: envoy.filters.http.rbac // path: // - key: a // - key: b // - key: c // value: // string_match: // prefix: pr // // The following MetadataMatcher is matched as the code will match one of the string values in the // list at the path [a, t]. // // .. code-block:: yaml // // filter: envoy.filters.http.rbac // path: // - key: a // - key: t // value: // list_match: // one_of: // string_match: // exact: m // // An example use of MetadataMatcher is specifying additional metadata in envoy.filters.http.rbac to // enforce access control based on dynamic metadata in a request. See :ref:`Permission // ` and :ref:`Principal // `. // [#next-major-version: MetadataMatcher should use StructMatcher] message MetadataMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.MetadataMatcher"; // Specifies the segment in a path to retrieve value from Metadata. // Note: Currently it's not supported to retrieve a value from a list in Metadata. This means that // if the segment key refers to a list, it has to be the last segment in a path. message PathSegment { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.MetadataMatcher.PathSegment"; oneof segment { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The filter name to retrieve the Struct from the Metadata. string filter = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; // The MetadataMatcher is matched if the value retrieved by path is matched to this value. ValueMatcher value = 3 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/type/matcher/v4alpha/node.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v4alpha; import "envoy/type/matcher/v4alpha/string.proto"; import "envoy/type/matcher/v4alpha/struct.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; option java_outer_classname = "NodeProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Node matcher] // Specifies the way to match a Node. // The match follows AND semantics. message NodeMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.NodeMatcher"; // Specifies match criteria on the node id. StringMatcher node_id = 1; // Specifies match criteria on the node metadata. repeated StructMatcher node_metadatas = 2; } ================================================ FILE: api/envoy/type/matcher/v4alpha/number.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v4alpha; import "envoy/type/v3/range.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; option java_outer_classname = "NumberProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Number matcher] // Specifies the way to match a double value. message DoubleMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.DoubleMatcher"; oneof match_pattern { option (validate.required) = true; // If specified, the input double value must be in the range specified here. // Note: The range is using half-open interval semantics [start, end). v3.DoubleRange range = 1; // If specified, the input double value must be equal to the value specified here. double exact = 2; } } ================================================ FILE: api/envoy/type/matcher/v4alpha/path.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v4alpha; import "envoy/type/matcher/v4alpha/string.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; option java_outer_classname = "PathProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Path matcher] // Specifies the way to match a path on HTTP request. message PathMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.PathMatcher"; oneof rule { option (validate.required) = true; // The `path` must match the URL path portion of the :path header. The query and fragment // string (if present) are removed in the URL path portion. // For example, the path */data* will match the *:path* header */data#fragment?param=value*. StringMatcher path = 1 [(validate.rules).message = {required: true}]; } } ================================================ FILE: api/envoy/type/matcher/v4alpha/regex.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v4alpha; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; option java_outer_classname = "RegexProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Regex matcher] // A regex matcher designed for safety when used with untrusted input. message RegexMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher"; // Google's `RE2 `_ regex engine. The regex string must adhere to // the documented `syntax `_. The engine is designed // to complete execution in linear time as well as limit the amount of memory used. // // Envoy supports program size checking via runtime. The runtime keys `re2.max_program_size.error_level` // and `re2.max_program_size.warn_level` can be set to integers as the maximum program size or // complexity that a compiled regex can have before an exception is thrown or a warning is // logged, respectively. `re2.max_program_size.error_level` defaults to 100, and // `re2.max_program_size.warn_level` has no default if unset (will not check/log a warning). // // Envoy emits two stats for tracking the program size of regexes: the histogram `re2.program_size`, // which records the program size, and the counter `re2.exceeded_warn_level`, which is incremented // each time the program size exceeds the warn level threshold. message GoogleRE2 { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatcher.GoogleRE2"; reserved 1; reserved "max_program_size"; } oneof engine_type { option (validate.required) = true; // Google's RE2 regex engine. GoogleRE2 google_re2 = 1 [(validate.rules).message = {required: true}]; } // The regex match string. The string must be supported by the configured engine. string regex = 2 [(validate.rules).string = {min_len: 1}]; } // Describes how to match a string and then produce a new string using a regular // expression and a substitution string. message RegexMatchAndSubstitute { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.RegexMatchAndSubstitute"; // The regular expression used to find portions of a string (hereafter called // the "subject string") that should be replaced. When a new string is // produced during the substitution operation, the new string is initially // the same as the subject string, but then all matches in the subject string // are replaced by the substitution string. If replacing all matches isn't // desired, regular expression anchors can be used to ensure a single match, // so as to replace just one occurrence of a pattern. Capture groups can be // used in the pattern to extract portions of the subject string, and then // referenced in the substitution string. RegexMatcher pattern = 1 [(validate.rules).message = {required: true}]; // The string that should be substituted into matching portions of the // subject string during a substitution operation to produce a new string. // Capture groups in the pattern can be referenced in the substitution // string. Note, however, that the syntax for referring to capture groups is // defined by the chosen regular expression engine. Google's `RE2 // `_ regular expression engine uses a // backslash followed by the capture group number to denote a numbered // capture group. E.g., ``\1`` refers to capture group 1, and ``\2`` refers // to capture group 2. string substitution = 2; } ================================================ FILE: api/envoy/type/matcher/v4alpha/string.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v4alpha; import "envoy/type/matcher/v4alpha/regex.proto"; import "envoy/annotations/deprecation.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; option java_outer_classname = "StringProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: String matcher] // Specifies the way to match a string. // [#next-free-field: 8] message StringMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.StringMatcher"; reserved 4; reserved "regex"; oneof match_pattern { option (validate.required) = true; // The input string must match exactly the string specified here. // // Examples: // // * *abc* only matches the value *abc*. string exact = 1; // The input string must have the prefix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *abc.xyz* string prefix = 2 [(validate.rules).string = {min_len: 1}]; // The input string must have the suffix specified here. // Note: empty prefix is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *xyz.abc* string suffix = 3 [(validate.rules).string = {min_len: 1}]; // The input string must match the regular expression specified here. RegexMatcher safe_regex = 5 [(validate.rules).message = {required: true}]; // The input string must have the substring specified here. // Note: empty contains match is not allowed, please use regex instead. // // Examples: // // * *abc* matches the value *xyz.abc.def* string contains = 7 [(validate.rules).string = {min_len: 1}]; } // If true, indicates the exact/prefix/suffix matching should be case insensitive. This has no // effect for the safe_regex match. // For example, the matcher *data* will match both input string *Data* and *data* if set to true. bool ignore_case = 6; } // Specifies a list of ways to match a string. message ListStringMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ListStringMatcher"; repeated StringMatcher patterns = 1 [(validate.rules).repeated = {min_items: 1}]; } ================================================ FILE: api/envoy/type/matcher/v4alpha/struct.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v4alpha; import "envoy/type/matcher/v4alpha/value.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; option java_outer_classname = "StructProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Struct matcher] // StructMatcher provides a general interface to check if a given value is matched in // google.protobuf.Struct. It uses `path` to retrieve the value // from the struct and then check if it's matched to the specified value. // // For example, for the following Struct: // // .. code-block:: yaml // // fields: // a: // struct_value: // fields: // b: // struct_value: // fields: // c: // string_value: pro // t: // list_value: // values: // - string_value: m // - string_value: n // // The following MetadataMatcher is matched as the path [a, b, c] will retrieve a string value "pro" // from the Metadata which is matched to the specified prefix match. // // .. code-block:: yaml // // path: // - key: a // - key: b // - key: c // value: // string_match: // prefix: pr // // The following StructMatcher is matched as the code will match one of the string values in the // list at the path [a, t]. // // .. code-block:: yaml // // path: // - key: a // - key: t // value: // list_match: // one_of: // string_match: // exact: m // // An example use of StructMatcher is to match metadata in envoy.v*.core.Node. message StructMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.StructMatcher"; // Specifies the segment in a path to retrieve value from Struct. message PathSegment { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.StructMatcher.PathSegment"; oneof segment { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The path to retrieve the Value from the Struct. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; // The StructMatcher is matched if the value retrieved by path is matched to this value. ValueMatcher value = 3 [(validate.rules).message = {required: true}]; } ================================================ FILE: api/envoy/type/matcher/v4alpha/value.proto ================================================ syntax = "proto3"; package envoy.type.matcher.v4alpha; import "envoy/type/matcher/v4alpha/number.proto"; import "envoy/type/matcher/v4alpha/string.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher.v4alpha"; option java_outer_classname = "ValueProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = NEXT_MAJOR_VERSION_CANDIDATE; // [#protodoc-title: Value matcher] // Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. // StructValue is not supported and is always not matched. // [#next-free-field: 7] message ValueMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ValueMatcher"; // NullMatch is an empty message to specify a null value. message NullMatch { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ValueMatcher.NullMatch"; } // Specifies how to match a value. oneof match_pattern { option (validate.required) = true; // If specified, a match occurs if and only if the target value is a NullValue. NullMatch null_match = 1; // If specified, a match occurs if and only if the target value is a double value and is // matched to this field. DoubleMatcher double_match = 2; // If specified, a match occurs if and only if the target value is a string value and is // matched to this field. StringMatcher string_match = 3; // If specified, a match occurs if and only if the target value is a bool value and is equal // to this field. bool bool_match = 4; // If specified, value match will be performed based on whether the path is referring to a // valid primitive value in the metadata. If the path is referring to a non-primitive value, // the result is always not matched. bool present_match = 5; // If specified, a match occurs if and only if the target value is a list value and // is matched to this field. ListMatcher list_match = 6; } } // Specifies the way to match a list value. message ListMatcher { option (udpa.annotations.versioning).previous_message_type = "envoy.type.matcher.v3.ListMatcher"; oneof match_pattern { option (validate.required) = true; // If specified, at least one of the values in the list must match the value specified. ValueMatcher one_of = 1; } } ================================================ FILE: api/envoy/type/matcher/value.proto ================================================ syntax = "proto3"; package envoy.type.matcher; import "envoy/type/matcher/number.proto"; import "envoy/type/matcher/string.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.matcher"; option java_outer_classname = "ValueProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Value matcher] // Specifies the way to match a ProtobufWkt::Value. Primitive values and ListValue are supported. // StructValue is not supported and is always not matched. // [#next-free-field: 7] message ValueMatcher { // NullMatch is an empty message to specify a null value. message NullMatch { } // Specifies how to match a value. oneof match_pattern { option (validate.required) = true; // If specified, a match occurs if and only if the target value is a NullValue. NullMatch null_match = 1; // If specified, a match occurs if and only if the target value is a double value and is // matched to this field. DoubleMatcher double_match = 2; // If specified, a match occurs if and only if the target value is a string value and is // matched to this field. StringMatcher string_match = 3; // If specified, a match occurs if and only if the target value is a bool value and is equal // to this field. bool bool_match = 4; // If specified, value match will be performed based on whether the path is referring to a // valid primitive value in the metadata. If the path is referring to a non-primitive value, // the result is always not matched. bool present_match = 5; // If specified, a match occurs if and only if the target value is a list value and // is matched to this field. ListMatcher list_match = 6; } } // Specifies the way to match a list value. message ListMatcher { oneof match_pattern { option (validate.required) = true; // If specified, at least one of the values in the list must match the value specified. ValueMatcher one_of = 1; } } ================================================ FILE: api/envoy/type/metadata/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = ["@com_github_cncf_udpa//udpa/annotations:pkg"], ) ================================================ FILE: api/envoy/type/metadata/v2/metadata.proto ================================================ syntax = "proto3"; package envoy.type.metadata.v2; import "udpa/annotations/migrate.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.metadata.v2"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_migrate).move_to_package = "envoy.type.metadata.v3"; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Metadata] // MetadataKey provides a general interface using `key` and `path` to retrieve value from // :ref:`Metadata `. // // For example, for the following Metadata: // // .. code-block:: yaml // // filter_metadata: // envoy.xxx: // prop: // foo: bar // xyz: // hello: envoy // // The following MetadataKey will retrieve a string value "bar" from the Metadata. // // .. code-block:: yaml // // key: envoy.xxx // path: // - key: prop // - key: foo // message MetadataKey { // Specifies the segment in a path to retrieve value from Metadata. // Currently it is only supported to specify the key, i.e. field name, as one segment of a path. message PathSegment { oneof segment { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. string key = 1 [(validate.rules).string = {min_bytes: 1}]; } } // The key name of Metadata to retrieve the Struct from the metadata. // Typically, it represents a builtin subsystem or custom extension. string key = 1 [(validate.rules).string = {min_bytes: 1}]; // The path to retrieve the Value from the Struct. It can be a prefix or a full path, // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example, // which depends on the particular scenario. // // Note: Due to that only the key type segment is supported, the path can not specify a list // unless the list is the last segment. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; } // Describes what kind of metadata. message MetadataKind { // Represents dynamic metadata associated with the request. message Request { } // Represents metadata from :ref:`the route`. message Route { } // Represents metadata from :ref:`the upstream cluster`. message Cluster { } // Represents metadata from :ref:`the upstream // host`. message Host { } oneof kind { option (validate.required) = true; // Request kind of metadata. Request request = 1; // Route kind of metadata. Route route = 2; // Cluster kind of metadata. Cluster cluster = 3; // Host kind of metadata. Host host = 4; } } ================================================ FILE: api/envoy/type/metadata/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/type/metadata/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/type/metadata/v3/metadata.proto ================================================ syntax = "proto3"; package envoy.type.metadata.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.metadata.v3"; option java_outer_classname = "MetadataProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Metadata] // MetadataKey provides a general interface using `key` and `path` to retrieve value from // :ref:`Metadata `. // // For example, for the following Metadata: // // .. code-block:: yaml // // filter_metadata: // envoy.xxx: // prop: // foo: bar // xyz: // hello: envoy // // The following MetadataKey will retrieve a string value "bar" from the Metadata. // // .. code-block:: yaml // // key: envoy.xxx // path: // - key: prop // - key: foo // message MetadataKey { option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKey"; // Specifies the segment in a path to retrieve value from Metadata. // Currently it is only supported to specify the key, i.e. field name, as one segment of a path. message PathSegment { option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKey.PathSegment"; oneof segment { option (validate.required) = true; // If specified, use the key to retrieve the value in a Struct. string key = 1 [(validate.rules).string = {min_len: 1}]; } } // The key name of Metadata to retrieve the Struct from the metadata. // Typically, it represents a builtin subsystem or custom extension. string key = 1 [(validate.rules).string = {min_len: 1}]; // The path to retrieve the Value from the Struct. It can be a prefix or a full path, // e.g. ``[prop, xyz]`` for a struct or ``[prop, foo]`` for a string in the example, // which depends on the particular scenario. // // Note: Due to that only the key type segment is supported, the path can not specify a list // unless the list is the last segment. repeated PathSegment path = 2 [(validate.rules).repeated = {min_items: 1}]; } // Describes what kind of metadata. message MetadataKind { option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKind"; // Represents dynamic metadata associated with the request. message Request { option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKind.Request"; } // Represents metadata from :ref:`the route`. message Route { option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKind.Route"; } // Represents metadata from :ref:`the upstream cluster`. message Cluster { option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKind.Cluster"; } // Represents metadata from :ref:`the upstream // host`. message Host { option (udpa.annotations.versioning).previous_message_type = "envoy.type.metadata.v2.MetadataKind.Host"; } oneof kind { option (validate.required) = true; // Request kind of metadata. Request request = 1; // Route kind of metadata. Route route = 2; // Cluster kind of metadata. Cluster cluster = 3; // Host kind of metadata. Host host = 4; } } ================================================ FILE: api/envoy/type/percent.proto ================================================ syntax = "proto3"; package envoy.type; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "PercentProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Percent] // Identifies a percentage, in the range [0.0, 100.0]. message Percent { double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; } // A fractional percentage is used in cases in which for performance reasons performing floating // point to integer conversions during randomness calculations is undesirable. The message includes // both a numerator and denominator that together determine the final fractional value. // // * **Example**: 1/100 = 1%. // * **Example**: 3/10000 = 0.03%. message FractionalPercent { // Fraction percentages support several fixed denominator values. enum DenominatorType { // 100. // // **Example**: 1/100 = 1%. HUNDRED = 0; // 10,000. // // **Example**: 1/10000 = 0.01%. TEN_THOUSAND = 1; // 1,000,000. // // **Example**: 1/1000000 = 0.0001%. MILLION = 2; } // Specifies the numerator. Defaults to 0. uint32 numerator = 1; // Specifies the denominator. If the denominator specified is less than the numerator, the final // fractional percentage is capped at 1 (100%). DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/type/range.proto ================================================ syntax = "proto3"; package envoy.type; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "RangeProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Range] // Specifies the int64 start and end of the range using half-open interval semantics [start, // end). message Int64Range { // start of the range (inclusive) int64 start = 1; // end of the range (exclusive) int64 end = 2; } // Specifies the int32 start and end of the range using half-open interval semantics [start, // end). message Int32Range { // start of the range (inclusive) int32 start = 1; // end of the range (exclusive) int32 end = 2; } // Specifies the double start and end of the range using half-open interval semantics [start, // end). message DoubleRange { // start of the range (inclusive) double start = 1; // end of the range (exclusive) double end = 2; } ================================================ FILE: api/envoy/type/semantic_version.proto ================================================ syntax = "proto3"; package envoy.type; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "SemanticVersionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Semantic Version] // Envoy uses SemVer (https://semver.org/). Major/minor versions indicate // expected behaviors and APIs, the patch version field is used only // for security fixes and can be generally ignored. message SemanticVersion { uint32 major_number = 1; uint32 minor_number = 2; uint32 patch = 3; } ================================================ FILE: api/envoy/type/token_bucket.proto ================================================ syntax = "proto3"; package envoy.type; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type"; option java_outer_classname = "TokenBucketProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Token bucket] // Configures a token bucket, typically used for rate limiting. message TokenBucket { // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket // initially contains. uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}]; // The number of tokens added to the bucket during each fill interval. If not specified, defaults // to a single token. google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}]; // The fill interval that tokens are added to the bucket. During each fill interval // `tokens_per_fill` are added to the bucket. The bucket will never contain more than // `max_tokens` tokens. google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = { required: true gt {} }]; } ================================================ FILE: api/envoy/type/tracing/v2/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/type/metadata/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/type/tracing/v2/custom_tag.proto ================================================ syntax = "proto3"; package envoy.type.tracing.v2; import "envoy/type/metadata/v2/metadata.proto"; import "udpa/annotations/status.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.tracing.v2"; option java_outer_classname = "CustomTagProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = FROZEN; // [#protodoc-title: Custom Tag] // Describes custom tags for the active span. // [#next-free-field: 6] message CustomTag { // Literal type custom tag with static value for the tag value. message Literal { // Static literal value to populate the tag value. string value = 1 [(validate.rules).string = {min_bytes: 1}]; } // Environment type custom tag with environment name and default value. message Environment { // Environment variable name to obtain the value to populate the tag value. string name = 1 [(validate.rules).string = {min_bytes: 1}]; // When the environment variable is not found, // the tag value will be populated with this default value if specified, // otherwise no tag will be populated. string default_value = 2; } // Header type custom tag with header name and default value. message Header { // Header name to obtain the value to populate the tag value. string name = 1 [(validate.rules).string = {min_bytes: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // When the header does not exist, // the tag value will be populated with this default value if specified, // otherwise no tag will be populated. string default_value = 2; } // Metadata type custom tag using // :ref:`MetadataKey ` to retrieve the protobuf value // from :ref:`Metadata `, and populate the tag value with // `the canonical JSON `_ // representation of it. message Metadata { // Specify what kind of metadata to obtain tag value from. metadata.v2.MetadataKind kind = 1; // Metadata key to define the path to retrieve the tag value. metadata.v2.MetadataKey metadata_key = 2; // When no valid metadata is found, // the tag value would be populated with this default value if specified, // otherwise no tag would be populated. string default_value = 3; } // Used to populate the tag name. string tag = 1 [(validate.rules).string = {min_bytes: 1}]; // Used to specify what kind of custom tag. oneof type { option (validate.required) = true; // A literal custom tag. Literal literal = 2; // An environment custom tag. Environment environment = 3; // A request header custom tag. Header request_header = 4; // A custom tag to obtain tag value from the metadata. Metadata metadata = 5; } } ================================================ FILE: api/envoy/type/tracing/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v2:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/type/tracing/v3/custom_tag.proto ================================================ syntax = "proto3"; package envoy.type.tracing.v3; import "envoy/type/metadata/v3/metadata.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.tracing.v3"; option java_outer_classname = "CustomTagProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Custom Tag] // Describes custom tags for the active span. // [#next-free-field: 6] message CustomTag { option (udpa.annotations.versioning).previous_message_type = "envoy.type.tracing.v2.CustomTag"; // Literal type custom tag with static value for the tag value. message Literal { option (udpa.annotations.versioning).previous_message_type = "envoy.type.tracing.v2.CustomTag.Literal"; // Static literal value to populate the tag value. string value = 1 [(validate.rules).string = {min_len: 1}]; } // Environment type custom tag with environment name and default value. message Environment { option (udpa.annotations.versioning).previous_message_type = "envoy.type.tracing.v2.CustomTag.Environment"; // Environment variable name to obtain the value to populate the tag value. string name = 1 [(validate.rules).string = {min_len: 1}]; // When the environment variable is not found, // the tag value will be populated with this default value if specified, // otherwise no tag will be populated. string default_value = 2; } // Header type custom tag with header name and default value. message Header { option (udpa.annotations.versioning).previous_message_type = "envoy.type.tracing.v2.CustomTag.Header"; // Header name to obtain the value to populate the tag value. string name = 1 [(validate.rules).string = {min_len: 1 well_known_regex: HTTP_HEADER_NAME strict: false}]; // When the header does not exist, // the tag value will be populated with this default value if specified, // otherwise no tag will be populated. string default_value = 2; } // Metadata type custom tag using // :ref:`MetadataKey ` to retrieve the protobuf value // from :ref:`Metadata `, and populate the tag value with // `the canonical JSON `_ // representation of it. message Metadata { option (udpa.annotations.versioning).previous_message_type = "envoy.type.tracing.v2.CustomTag.Metadata"; // Specify what kind of metadata to obtain tag value from. metadata.v3.MetadataKind kind = 1; // Metadata key to define the path to retrieve the tag value. metadata.v3.MetadataKey metadata_key = 2; // When no valid metadata is found, // the tag value would be populated with this default value if specified, // otherwise no tag would be populated. string default_value = 3; } // Used to populate the tag name. string tag = 1 [(validate.rules).string = {min_len: 1}]; // Used to specify what kind of custom tag. oneof type { option (validate.required) = true; // A literal custom tag. Literal literal = 2; // An environment custom tag. Environment environment = 3; // A request header custom tag. Header request_header = 4; // A custom tag to obtain tag value from the metadata. Metadata metadata = 5; } } ================================================ FILE: api/envoy/type/v3/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/proto_sync.py. load("@envoy_api//bazel:api_build_system.bzl", "api_proto_package") licenses(["notice"]) # Apache 2 api_proto_package( deps = [ "//envoy/type:pkg", "@com_github_cncf_udpa//udpa/annotations:pkg", ], ) ================================================ FILE: api/envoy/type/v3/hash_policy.proto ================================================ syntax = "proto3"; package envoy.type.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "HashPolicyProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Hash Policy] // Specifies the hash policy message HashPolicy { option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy"; // The source IP will be used to compute the hash used by hash-based load balancing // algorithms. message SourceIp { option (udpa.annotations.versioning).previous_message_type = "envoy.type.HashPolicy.SourceIp"; } oneof policy_specifier { option (validate.required) = true; SourceIp source_ip = 1; } } ================================================ FILE: api/envoy/type/v3/http.proto ================================================ syntax = "proto3"; package envoy.type.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "HttpProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP] enum CodecClientType { HTTP1 = 0; HTTP2 = 1; // [#not-implemented-hide:] QUIC implementation is not production ready yet. Use this enum with // caution to prevent accidental execution of QUIC code. I.e. `!= HTTP2` is no longer sufficient // to distinguish HTTP1 and HTTP2 traffic. HTTP3 = 2; } ================================================ FILE: api/envoy/type/v3/http_status.proto ================================================ syntax = "proto3"; package envoy.type.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "HttpStatusProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: HTTP status codes] // HTTP response codes supported in Envoy. // For more details: https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml enum StatusCode { // Empty - This code not part of the HTTP status code specification, but it is needed for proto // `enum` type. Empty = 0; Continue = 100; OK = 200; Created = 201; Accepted = 202; NonAuthoritativeInformation = 203; NoContent = 204; ResetContent = 205; PartialContent = 206; MultiStatus = 207; AlreadyReported = 208; IMUsed = 226; MultipleChoices = 300; MovedPermanently = 301; Found = 302; SeeOther = 303; NotModified = 304; UseProxy = 305; TemporaryRedirect = 307; PermanentRedirect = 308; BadRequest = 400; Unauthorized = 401; PaymentRequired = 402; Forbidden = 403; NotFound = 404; MethodNotAllowed = 405; NotAcceptable = 406; ProxyAuthenticationRequired = 407; RequestTimeout = 408; Conflict = 409; Gone = 410; LengthRequired = 411; PreconditionFailed = 412; PayloadTooLarge = 413; URITooLong = 414; UnsupportedMediaType = 415; RangeNotSatisfiable = 416; ExpectationFailed = 417; MisdirectedRequest = 421; UnprocessableEntity = 422; Locked = 423; FailedDependency = 424; UpgradeRequired = 426; PreconditionRequired = 428; TooManyRequests = 429; RequestHeaderFieldsTooLarge = 431; InternalServerError = 500; NotImplemented = 501; BadGateway = 502; ServiceUnavailable = 503; GatewayTimeout = 504; HTTPVersionNotSupported = 505; VariantAlsoNegotiates = 506; InsufficientStorage = 507; LoopDetected = 508; NotExtended = 510; NetworkAuthenticationRequired = 511; } // HTTP status. message HttpStatus { option (udpa.annotations.versioning).previous_message_type = "envoy.type.HttpStatus"; // Supplies HTTP response code. StatusCode code = 1 [(validate.rules).enum = {defined_only: true not_in: 0}]; } ================================================ FILE: api/envoy/type/v3/percent.proto ================================================ syntax = "proto3"; package envoy.type.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "PercentProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Percent] // Identifies a percentage, in the range [0.0, 100.0]. message Percent { option (udpa.annotations.versioning).previous_message_type = "envoy.type.Percent"; double value = 1 [(validate.rules).double = {lte: 100.0 gte: 0.0}]; } // A fractional percentage is used in cases in which for performance reasons performing floating // point to integer conversions during randomness calculations is undesirable. The message includes // both a numerator and denominator that together determine the final fractional value. // // * **Example**: 1/100 = 1%. // * **Example**: 3/10000 = 0.03%. message FractionalPercent { option (udpa.annotations.versioning).previous_message_type = "envoy.type.FractionalPercent"; // Fraction percentages support several fixed denominator values. enum DenominatorType { // 100. // // **Example**: 1/100 = 1%. HUNDRED = 0; // 10,000. // // **Example**: 1/10000 = 0.01%. TEN_THOUSAND = 1; // 1,000,000. // // **Example**: 1/1000000 = 0.0001%. MILLION = 2; } // Specifies the numerator. Defaults to 0. uint32 numerator = 1; // Specifies the denominator. If the denominator specified is less than the numerator, the final // fractional percentage is capped at 1 (100%). DenominatorType denominator = 2 [(validate.rules).enum = {defined_only: true}]; } ================================================ FILE: api/envoy/type/v3/range.proto ================================================ syntax = "proto3"; package envoy.type.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "RangeProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Range] // Specifies the int64 start and end of the range using half-open interval semantics [start, // end). message Int64Range { option (udpa.annotations.versioning).previous_message_type = "envoy.type.Int64Range"; // start of the range (inclusive) int64 start = 1; // end of the range (exclusive) int64 end = 2; } // Specifies the int32 start and end of the range using half-open interval semantics [start, // end). message Int32Range { option (udpa.annotations.versioning).previous_message_type = "envoy.type.Int32Range"; // start of the range (inclusive) int32 start = 1; // end of the range (exclusive) int32 end = 2; } // Specifies the double start and end of the range using half-open interval semantics [start, // end). message DoubleRange { option (udpa.annotations.versioning).previous_message_type = "envoy.type.DoubleRange"; // start of the range (inclusive) double start = 1; // end of the range (exclusive) double end = 2; } ================================================ FILE: api/envoy/type/v3/ratelimit_unit.proto ================================================ syntax = "proto3"; package envoy.type.v3; import "udpa/annotations/status.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "RatelimitUnitProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Ratelimit Time Unit] // Identifies the unit of of time for rate limit. enum RateLimitUnit { // The time unit is not known. UNKNOWN = 0; // The time unit representing a second. SECOND = 1; // The time unit representing a minute. MINUTE = 2; // The time unit representing an hour. HOUR = 3; // The time unit representing a day. DAY = 4; } ================================================ FILE: api/envoy/type/v3/semantic_version.proto ================================================ syntax = "proto3"; package envoy.type.v3; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "SemanticVersionProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Semantic Version] // Envoy uses SemVer (https://semver.org/). Major/minor versions indicate // expected behaviors and APIs, the patch version field is used only // for security fixes and can be generally ignored. message SemanticVersion { option (udpa.annotations.versioning).previous_message_type = "envoy.type.SemanticVersion"; uint32 major_number = 1; uint32 minor_number = 2; uint32 patch = 3; } ================================================ FILE: api/envoy/type/v3/token_bucket.proto ================================================ syntax = "proto3"; package envoy.type.v3; import "google/protobuf/duration.proto"; import "google/protobuf/wrappers.proto"; import "udpa/annotations/status.proto"; import "udpa/annotations/versioning.proto"; import "validate/validate.proto"; option java_package = "io.envoyproxy.envoy.type.v3"; option java_outer_classname = "TokenBucketProto"; option java_multiple_files = true; option (udpa.annotations.file_status).package_version_status = ACTIVE; // [#protodoc-title: Token bucket] // Configures a token bucket, typically used for rate limiting. message TokenBucket { option (udpa.annotations.versioning).previous_message_type = "envoy.type.TokenBucket"; // The maximum tokens that the bucket can hold. This is also the number of tokens that the bucket // initially contains. uint32 max_tokens = 1 [(validate.rules).uint32 = {gt: 0}]; // The number of tokens added to the bucket during each fill interval. If not specified, defaults // to a single token. google.protobuf.UInt32Value tokens_per_fill = 2 [(validate.rules).uint32 = {gt: 0}]; // The fill interval that tokens are added to the bucket. During each fill interval // `tokens_per_fill` are added to the bucket. The bucket will never contain more than // `max_tokens` tokens. google.protobuf.Duration fill_interval = 3 [(validate.rules).duration = { required: true gt {} }]; } ================================================ FILE: api/examples/service_envoy/BUILD ================================================ licenses(["notice"]) # Apache 2 exports_files([ "http_connection_manager.pb", "listeners.pb", ]) ================================================ FILE: api/examples/service_envoy/http_connection_manager.pb ================================================ codec_type: AUTO stat_prefix: "ingress_http" route_config { virtual_hosts { name: "service" domains: "*" routes { match { prefix: "/service" } route { cluster: "local_service" timeout { seconds: 0 } } } } } http_filters { name: "router" } ================================================ FILE: api/examples/service_envoy/listeners.pb ================================================ address { socket_address { protocol: TCP port_value: 80 } } filter_chains { filters { name: "http_connection_manager" } } ================================================ FILE: api/test/build/BUILD ================================================ load("@envoy_api//bazel:api_build_system.bzl", "api_cc_test", "api_go_test") licenses(["notice"]) # Apache 2 api_cc_test( name = "build_test", srcs = ["build_test.cc"], deps = [ "//envoy/api/v2:pkg_cc_proto", "//envoy/service/accesslog/v2:pkg_cc_proto", "//envoy/service/discovery/v2:pkg_cc_proto", "//envoy/service/metrics/v2:pkg_cc_proto", "//envoy/service/ratelimit/v2:pkg_cc_proto", "@com_github_cncf_udpa//udpa/service/orca/v1:pkg_cc_proto", ], ) api_go_test( name = "go_build_test", size = "small", srcs = ["go_build_test.go"], importpath = "go_build_test", deps = [ "//envoy/api/v2:pkg_go_proto", "//envoy/api/v2/auth:pkg_go_proto", "//envoy/config/bootstrap/v2:pkg_go_proto", "//envoy/service/accesslog/v2:pkg_go_proto", "//envoy/service/discovery/v2:pkg_go_proto", "//envoy/service/metrics/v2:pkg_go_proto", "//envoy/service/ratelimit/v2:pkg_go_proto", "//envoy/service/trace/v2:pkg_go_proto", ], ) ================================================ FILE: api/test/build/build_test.cc ================================================ // NOLINT(namespace-envoy) #include #include #include "google/protobuf/descriptor.h" // Basic C++ build/link validation for the v2 xDS APIs. int main(int argc, char* argv[]) { const auto methods = { "envoy.api.v2.ClusterDiscoveryService.FetchClusters", "envoy.api.v2.ClusterDiscoveryService.StreamClusters", "envoy.api.v2.EndpointDiscoveryService.FetchEndpoints", "envoy.api.v2.EndpointDiscoveryService.StreamEndpoints", "envoy.api.v2.ListenerDiscoveryService.FetchListeners", "envoy.api.v2.ListenerDiscoveryService.StreamListeners", "envoy.api.v2.RouteDiscoveryService.FetchRoutes", "envoy.api.v2.RouteDiscoveryService.StreamRoutes", "envoy.service.discovery.v2.AggregatedDiscoveryService.StreamAggregatedResources", "envoy.service.discovery.v2.HealthDiscoveryService.FetchHealthCheck", "envoy.service.discovery.v2.HealthDiscoveryService.StreamHealthCheck", "envoy.service.discovery.v2.RuntimeDiscoveryService.FetchRuntime", "envoy.service.discovery.v2.RuntimeDiscoveryService.StreamRuntime", "envoy.service.accesslog.v2.AccessLogService.StreamAccessLogs", "envoy.service.metrics.v2.MetricsService.StreamMetrics", "envoy.service.ratelimit.v2.RateLimitService.ShouldRateLimit", "udpa.service.orca.v1.OpenRcaService.StreamCoreMetrics", }; for (const auto& method : methods) { if (google::protobuf::DescriptorPool::generated_pool()->FindMethodByName(method) == nullptr) { std::cout << "Unable to find method descriptor for " << method << std::endl; exit(EXIT_FAILURE); } } exit(EXIT_SUCCESS); } ================================================ FILE: api/test/build/go_build_test.go ================================================ package go_build_test import ( "testing" _ "github.com/envoyproxy/go-control-plane/envoy/api/v2" _ "github.com/envoyproxy/go-control-plane/envoy/api/v2/auth" _ "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/accesslog/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/metrics/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" _ "github.com/envoyproxy/go-control-plane/envoy/service/trace/v2" ) func TestNoop(t *testing.T) { // Noop test that verifies the successful importation of Envoy V2 API protos } ================================================ FILE: api/test/validate/BUILD ================================================ load("@envoy_api//bazel:api_build_system.bzl", "api_cc_test") licenses(["notice"]) # Apache 2 api_cc_test( name = "pgv_test", srcs = ["pgv_test.cc"], deps = [ "@envoy_api//envoy/api/v2:pkg_cc_proto", "@envoy_api//envoy/api/v2/core:pkg_cc_proto", "@envoy_api//envoy/api/v2/listener:pkg_cc_proto", "@envoy_api//envoy/api/v2/route:pkg_cc_proto", "@envoy_api//envoy/config/bootstrap/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/accesslog/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/buffer/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/fault/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/gzip/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/header_to_metadata/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/health_check/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/ip_tagging/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/lua/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/router/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/squash/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/http/transcoder/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/network/http_connection_manager/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/network/mongo_proxy/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/network/redis_proxy/v2:pkg_cc_proto", "@envoy_api//envoy/config/filter/network/tcp_proxy/v2:pkg_cc_proto", "@envoy_api//envoy/config/health_checker/redis/v2:pkg_cc_proto", ], ) ================================================ FILE: api/test/validate/pgv_test.cc ================================================ // NOLINT(namespace-envoy) #include #include // We don't use all the headers in the test below, but including them anyway as // a cheap way to get some C++ compiler sanity checking. #include "envoy/api/v2/cluster.pb.validate.h" #include "envoy/api/v2/endpoint.pb.validate.h" #include "envoy/api/v2/listener.pb.validate.h" #include "envoy/api/v2/route.pb.validate.h" #include "envoy/api/v2/core/protocol.pb.validate.h" #include "envoy/config/health_checker/redis/v2/redis.pb.validate.h" #include "envoy/config/filter/accesslog/v2/accesslog.pb.validate.h" #include "envoy/config/filter/http/buffer/v2/buffer.pb.validate.h" #include "envoy/config/filter/http/fault/v2/fault.pb.validate.h" #include "envoy/config/filter/http/gzip/v2/gzip.pb.validate.h" #include "envoy/config/filter/http/health_check/v2/health_check.pb.validate.h" #include "envoy/config/filter/http/header_to_metadata/v2/header_to_metadata.pb.validate.h" #include "envoy/config/filter/http/ip_tagging/v2/ip_tagging.pb.validate.h" #include "envoy/config/filter/http/lua/v2/lua.pb.validate.h" #include "envoy/config/filter/http/router/v2/router.pb.validate.h" #include "envoy/config/filter/http/squash/v2/squash.pb.validate.h" #include "envoy/config/filter/http/transcoder/v2/transcoder.pb.validate.h" #include "envoy/config/filter/network/http_connection_manager/v2/http_connection_manager.pb.validate.h" #include "envoy/config/filter/network/mongo_proxy/v2/mongo_proxy.pb.validate.h" #include "envoy/config/filter/network/redis_proxy/v2/redis_proxy.pb.validate.h" #include "envoy/config/filter/network/tcp_proxy/v2/tcp_proxy.pb.validate.h" #include "envoy/api/v2/listener/listener.pb.validate.h" #include "envoy/api/v2/route/route.pb.validate.h" #include "envoy/config/bootstrap/v2/bootstrap.pb.validate.h" #include "google/protobuf/text_format.h" template struct TestCase { void run() { std::string err; if (Validate(invalid_message, &err)) { std::cerr << "Unexpected successful validation of invalid message: " << invalid_message.DebugString() << std::endl; exit(EXIT_FAILURE); } if (!Validate(valid_message, &err)) { std::cerr << "Unexpected failed validation of valid message: " << valid_message.DebugString() << ", " << err << std::endl; exit(EXIT_FAILURE); } } Proto& invalid_message; Proto& valid_message; }; // Basic protoc-gen-validate C++ validation header inclusion and Validate calls // from data plane API. int main(int argc, char* argv[]) { envoy::config::bootstrap::v2::Bootstrap invalid_bootstrap; invalid_bootstrap.mutable_static_resources()->add_clusters(); // This is a baseline test of the validation features we care about. It's // probably not worth adding in every filter and field that we want to valid // in the API upfront, but as regressions occur, this is the place to add the // specific case. const std::string valid_bootstrap_text = R"EOF( node {} cluster_manager {} admin { access_log_path: "/dev/null" address { pipe { path: "/" } } } )EOF"; envoy::config::bootstrap::v2::Bootstrap valid_bootstrap; if (!google::protobuf::TextFormat::ParseFromString(valid_bootstrap_text, &valid_bootstrap)) { std::cerr << "Unable to parse text proto: " << valid_bootstrap_text << std::endl; exit(EXIT_FAILURE); } TestCase{invalid_bootstrap, valid_bootstrap}.run(); exit(EXIT_SUCCESS); } ================================================ FILE: api/tools/BUILD ================================================ load("@rules_python//python:defs.bzl", "py_binary", "py_test") licenses(["notice"]) # Apache 2 py_binary( name = "tap2pcap", srcs = ["tap2pcap.py"], licenses = ["notice"], # Apache 2 visibility = ["//visibility:public"], deps = ["//envoy/data/tap/v2alpha:pkg_py_proto"], ) py_test( name = "tap2pcap_test", srcs = ["tap2pcap_test.py"], data = [ "data/tap2pcap_h2_ipv4.pb_text", "data/tap2pcap_h2_ipv4.txt", ], # Don't run this by default, since we don't want to force local dependency on Wireshark/tshark, # will explicitly invoke in CI. tags = ["manual"], visibility = ["//visibility:public"], deps = [":tap2pcap"], ) py_binary( name = "generate_listeners", srcs = ["generate_listeners.py"], licenses = ["notice"], # Apache 2 visibility = ["//visibility:public"], deps = [ "//envoy/api/v2:pkg_py_proto", "//envoy/config/filter/network/http_connection_manager/v2:pkg_py_proto", ], ) py_test( name = "generate_listeners_test", srcs = ["generate_listeners_test.py"], data = [ "//examples/service_envoy:http_connection_manager.pb", "//examples/service_envoy:listeners.pb", ], visibility = ["//visibility:public"], deps = [":generate_listeners"], ) ================================================ FILE: api/tools/data/tap2pcap_h2_ipv4.pb_text ================================================ socket_buffered_trace { connection { local_address { socket_address { address: "127.0.0.1" port_value: 10000 } } remote_address { socket_address { address: "127.0.0.1" port_value: 53288 } } } events { timestamp { seconds: 1525207293 nanos: 216737962 } read { data: { as_bytes: "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n\000\000\022\004\000\000\000\000\000\000\003\000\000\000d\000\004@\000\000\000\000\002\000\000\000\000\000\000\004\010\000\000\000\000\000?\377\000\001\000\000\036\001\005\000\000\000\001\202\204\206A\212\240\344\035\023\235\t\270\020\000\000z\210%\266P\303\253\266\362\340S\003*/*" } } } events { timestamp { seconds: 1525207293 nanos: 230450657 } write { data: { as_bytes: "\000\000\006\004\000\000\000\000\000\000\004\020\000\000\000\000\000\000\004\001\000\000\000\000\000\000\004\010\000\000\000\000\000\017\377\000\001" } } } events { timestamp { seconds: 1525207293 nanos: 230558250 } read { data: { as_bytes: "\000\000\000\004\001\000\000\000\000" } } } events { timestamp { seconds: 1525207293 nanos: 345386933 } write { data: { as_bytes: "\000\025\223\001\004\000\000\000\001\210@\217\362\264\307<\324\025d\025\0101\352X\325J\177\211\3056\316p\232l\371!\301\000\216\362\264\307<\324\025b\371\254\266\032\222\324\237\377\277 \023n6\357\320\200\027]o\350@\013\300s\350@\013\302\177\351\326\302\333\241\372u\2612\363\237Ae\260\205\327>\202\313b\003\301\372\013-\211\226\333\372\013-\211\226\335\372\013-\211\267\033\372\013-\211\366[\372\013-\211\367\001\364\026[d\017\271\364\026[d/=\364\026[dL\271\364\026[e\221;\364\026[e\267\031\372\013-\262\353N\375\005\226\331x\014\375\005\226\331x\r}\005\226\331x \372\013-\262\360^\372\013-\262\373\200\372\013-\262\373\354\375\005\226\332\003\356}\005\226\332\013\216\375\005\226\332\013\316\375\005\226\332\020\232\372\013-\264&\203\364\026[hN\273\364\026[i\220;\364\026[i\226\231\372\013-\264\350\003\364\026[i\326\335\372\013-\264\360[\372\013-\264\363m}\005\226\332|-\375\005\226\332}\347~\202\313m\201\227~\202\313m\210\"\372\013-\266\'\032\372\013-\266\'\303\364\026[m\246\335\372\013-\266\330]\372\013-\266\333\356\375\005\226\333m\367\376\202\313m\270\340\276\202\313m\272\340~\202\313m\272\350~\202\313m\272\363?Ae\266\335y\377\240\262\333o8\017\240\262\333o\211\237\240\262\333\200\020>\202\313n\000L\375\005\226\334\003N}\005\226\334\003\257\375\005\226\334\010^\372\013-\270\027\201\364\026[p/\013\350,\266\340\234o\350,\266\343 o\350,\266\343L\271\364\026[q\246\201\364\026[q\300\207\350,\266\343\201o\350,\266\343\201w\350,\266\343\201\177\350,\266\343\216\003\350,\266\343\217\273\364\026[q\3215\364\026[q\326\235\372\013-\270\353\257\375\005\226\334u\367>\202\313n<\333?Ae\267\036u\377\240\262\333\217\200?Ae\267\037i\377\240\262\333\240e\317\240\262\333\240x_Ae\267B\313\337Ae\267B\320\276\202\313n\205\307\276\202\313n\205\347>\202\313n\210\017\375\005\226\335\020_\372\013-\272\313m\375\005\226\335h\017\375\005\226\335h!\372\013-\272\320\203\364\026[u\247\237\372\013-\272\343\301\372\013-\272\363-}\005\226\335|.\375\005\226\335}\227~\202\313n\276\343?Ae\267_}\237\240\262\333\300q\257\240\262\333\300t?Ae\267\200\353?Ae\267\200\370~\202\313o\004\017}\005\226\336\010C\364\026[x!s\350,\266\360\204\357\320Ym\3416\037\240\262\333\302}\237\240\262\333\314\205\377\240\262\333\314\272\377\320Ym\346\200\017\240\262\333\315\005\257\240\262\333\315\010?Ae\267\232\023\177Ae\267\234\020~\202\313o8\313\337Ae\267\234h_Ae\267\234}\377\240\262\333\317>\017\240\262\333\317\205\377\240\262\333\340\013\377Ae\267\300\313\337Ae\267\304\360\276\204\330\002&\335\372h\002e\240\277\364\320\004\373\301o\351\2402\020\201\377\246\200\310B\343_M\001\226\231d?M\001\226\236h\037M\001\227\237}\237\246\200\320\002\313\177M\001\246B\370>\232\003L\264\370\276\232\003M\274\320~\232\003N\270\360\276\232\003O2\323\277M\001\261:\340~\232\003m\262\343\177M\001\267\032p\037M\001\267\202\323\337M\001\267\336u\317\246\200\340\237y\277\246\200\343\254\270\327\323@q\346\337\027\323@q\360>\317\323@q\360>\337\323@q\367\031k\351\2408\373\214\271\364\320\034}\347\034\372h\016\204\016\203\351\240:\026\\\007\323@t.\274/\246\200\353o\005\317\246\200\353\217\274\347\323@u\360\t\357\246\200\353\342\003\237M\001\327\334m\317\246\200\353\3568\037\246\200\353\3568\377\323@x\014\211\257\246\200\360\033\013\337M\001\3406&\276\232\003\301\003/}4\007\204 \277\364\320\036d-;\364\320\036e\366\301\364\320\036i\240\263\364\320\036i\2417\364\320\036i\320\273\364\320\036m\327\201\364\320\036q\367]\372h\017>\026\305\364\320\037\000\202/\246\200\370\031\010>\232\003\355\276\020}4\007\334\023\315\3754\007\334\023\316}4\007\334y\306~\232\003\356>\323\337M\001\367B\333?M\001\367Zq\377\246\200\373\257\210_M\001\367_\023\377M\001\367\202\320>\232\003\3574\340>\232\003\3578\373\377M\001\367\235\020~\232\003\357:\'\276\232\010\000!9\364\320@\021\004?M\004\014\205\366~\232\010\031i\340\3754\0204\363b\372h m\226\\\372h m\247\303\364\320@\343-;\364\320@\343\2179\364\320@\343\317\203\351\240\201\326\302\337\323A\003\255\262\017\246\202\007_\013_M\004\017<\006\376\232\010\036y\227~\232\010\037d.}4\020>\323\340\372h!\013\340k\351\240\204/\266\337\323A\010_p?M\004-4\343\277M\004-6\353_M\004-6\373\277M\004-\211\340\3754\020\266\320A\364\320B\333\217\273\364\320B\343 o\351\240\205\306@\357\323A\013\241p?M\004.\205\320\3754\020\272\330Y\372h!u\360\265\364\320B\360\037\027\323A\013\315\264\367\323A\013\340d_M\004/\201\226\276\232\010_\003.}4\020\276\006\201\364\320B\370\035k\351\240\205\366\331g\351\240\205\366\336k\351\240\210\014\262/\246\202 6\373?M\004@q\300}4\021\004.\263\364\320D\020\272\337\323A\020Zi\277\246\202 \264\373?M\004Al/}4\021\005\307\334\372h\"\020\236\027\323A\023.4\037\246\202&\204\323\177M\004M2\370~\232\010\232}\340\3754\0216\323\257}4\0216\363.\3754\0216\373\242\372h\"p@\007\323A\023\202\023\337M\004N:\333_M\004N\201\347>\232\010\235\023L\3754\021:\310\032\372h\"u\2207\364\320D\353A{\351\240\211\326\302\337\323A\023\301\013\277M\004O\010-}4\021< \271\364\320D\363-\265\364\320D\363@\027\323A\023\315\272\017\246\202\'\233y\317\246\202\'\333\020>\232\010\237q\226~\232\010\237u\367>\232\013 \020\232\372h,\201\227\234\372h,\201\267\001\364\320Y\003n?\364\320Y\010B\327\323Ad.2/\246\202\310]e\337\246\202\310^e\257\246\202\310\200\320~\232\013\"\013\355\3754\026D\320^\372h,\211\246\205\364\320Y\023o9\364\320Y\023\217\275\364\320Y\023\317\275\364\320Yd.\013\351\240\262\313-\277\364\320Ye\240\275\364\320Ye\247\003\364\320Ye\260?\364\320Ye\306\\\372h,\262\343/}4\026Yy\266\276\232\013-2\343?M\005\226\232\023\337M\005\226\233q\237\246\202\313`\023\177M\005\226\300\363\337M\005\226\304\323\377M\005\226\332m\337\246\202\313m8\037\246\202\313m\272\347\323Ae\300>/\246\202\313\201e\277\246\202\313\201m\277\246\202\313\201|_M\005\227Z\013\177M\005\227Zq\257\246\202\313\255\205\237\246\202\313\255\276\357\323Ae\327D\357\323Ae\346\233o\351\240\262\363\341k\351\240\262\363\357\007\351\240\262\373L\271\364\320Z\000\002/\246\202\320\000&~\232\013@\003@\372h-\000M3\364\320Z\003\216\267\364\320Z\003\240k\351\240\264\007Yg\351\240\264\007\333\177\351\240\264\0204\377\323Ah,\262/\246\202\320Yi\317\246\202\320Ze\337\246\202\320[\020~\232\013Am\326\376\232\013Am\347\276\232\013Aq\346~\232\013At\016\3754\026\202\370[\372h-\005\366\331\372h-\t\221=\364\320Z\023\314\265\364\320Z\023\340w\351\240\264\310@\317\323Ai\220\205\237\246\202\323\"u\357\246\202\323,\274\337\323Ai\226\200\377\323Ai\227\033g\351\240\264\313\256\275\364\320Ze\327\205\364\320Ze\327\231\372h-2\370\005\364\320Ze\367\035\372h-4&Y\372h-4\310\233\372h-4\330\235\372h-4\333L\3754\026\232p\002\372h-4\340_\372h-4\353B\372h-6\020?\364\320Zm\247\003\364\320Zm\247\233\372h-6\330\005\364\320Zm\327E\364\320Zm\346\\\372h-8\007^\372h-8\007\201\364\320Zp-9\364\320Zp/3\364\320Zq\246\201\364\320Zq\246\205\364\320Zq\247]\372h-8\343!\372h-8\343\316\3754\026\234}\366\376\232\013N\201\267~\232\013N\264\373\337M\005\247_\000>\232\013N\276!}4\026\236\000]\372h-<\007\003\364\320Zx\016;\364\320ZxL\207\351\240\264\360\234{\351\240\264\363 k\351\240\264\363\"s\351\240\264\363\"{\351\240\264\363\"\177\351\240\264\363,\213\351\240\264\363O\275\364\320Zy\261\003\351\240\264\363b\027\323Ai\346\335g\351\240\264\363\255=\364\320Zy\347Y\372h-<\373b\372h->\000\273\364\320Z|\r\275\364\320Z|L\277\364\320Z}\226\303\364\320Z}\266\331\372h->\333\340\372h->\333\341\372h->\333\355\3754\026\237m\367\276\232\013O\270\'\376\232\013O\270\363?M\005\247\336\003\377M\005\247\336\013?M\005\247\336d\037M\005\247\337\013?M\005\260\000.\3754\026\300\006C\364\320[\000\037\007\323Al\r\005\257\246\202\330\032l?M\005\260<\007>\232\013`x.}4\026\302\'\034\372h-\204O\267\364\320[\010\237\177\351\240\266\026_\017\323Al.<\357\323Al.\266/\246\202\330]m\357\246\202\330^m\317\246\202\330^q\277\246\202\330^u\277\246\202\330_\013\377M\005\261\001\247\376\232\013b\013N\3754\026\304\333`\372h-\211\327\201\364\320[\023\341o\351\240\266\'\302\357\323AlO\266\017\246\202\333 i\377\246\202\333 q\357\246\202\333\"\000~\232\013l\210B\372h-\262\'^\372h-\262\313B\372h-\262\320\205\364\320[e\2605\364\320[e\301=\364\320[e\306\332\372h-\262\350\\\372h-\262\373b\372h-\262\373n\3754\026\332\000\201\364\320[h\016\213\351\240\266\320\036\017\323Am\246@\367\323Am\246[k\351\240\266\323\355\277\364\320[l,\277\364\320[l/\265\364\320[m\260\267\364\320[m\267\035\372h-\266\333\217\3754\026\333q\366\376\232\013m\272!}4\026\333u\220}4\026\333u\320}4\026\334\003O}4\026\334\003\356\3754\026\334\010\032\372h-\270\0207\364\320[p w\351\240\266\340Yw\351\240\266\340^\017\323Am\3014\327\323Am\306\202\337\323Am\307\033s\351\240\266\343\240k\351\240\266\343\240s\351\240\266\343\257\003\351\240\266\343\3173\364\320[t\016\207\351\240\266\350Y\177\351\240\266\353-\013\351\240\266\353-\203\351\240\266\353/\007\351\240\266\353/3\364\320[u\2405\364\320[u\306\336\372h-\272\350Z\372h-\272\353\201\372h-\272\353\356}4\026\335u\367\376\232\013n\274\000\3754\026\335y\227~\232\013n\274\363\277M\005\267_\023\377M\005\267\200\313\177M\005\267\200\343_M\005\267\202\373?M\005\267\231\003\177M\005\267\231\003\377M\005\267\234m\337\246\202\333\316\211\257\246\202\333\3176\327\323Am\347\304\017\246\202\333\355\272\017\246\202\333\356\t\277\246\202\333\3564/\246\202\333\356>\317\323Am\367]\007\323Am\367]g\351\240\266\373\256\273\364\320[}\360\013\351\240\266\373\355\007\351\240\266\373\356\273\364\320\\\000[\177\351\240\270\001\010\037M\005\3002\027>\232\013\200h\017}4\027\000\320_\372h.\001\2413\364\320\\\003N3\364\320\\\003O\213\351\240\270\007\304\317\323Ap\017\211\277\246\202\340\037i\317\246\202\340\037y\357\246\202\340@\007~\232\013\201\013\316\3754\027\002\313o}4\027\002\320Y\372h.\005\247\203\364\320\\\013O7\364\320\\\013O?\364\320\\\013a\027\323Ap-\211\317\246\202\340[i\357\246\202\340[|_M\005\300\266\373\177M\005\300\272\373_M\005\300\274\353\237M\005\300\276\027\276\232\013\201}\240\3754\027\004\006\331\372h.\010\017\003\351\240\270 \270/\246\202\340\204\'>\232\013\202\020\236\372h.\t\226C\364\320\\\023A\007\323ApM6\317\323ApM\204_M\005\3018 \3754\027\004\343\201\372h.\t\307\234\372h.2\007\034\372h.2\007\035\372h.2\026\237\372h.2 \013\351\240\270\313-9\364\320\\e\226\236\372h.2\313`\372h.2\313\357\3754\027\031l\017}4\027\031}\247\276\232\013\2152\313\337M\005\306\231q\377\246\202\343M:\017\246\202\343M<\037\246\202\343N\211\357\246\202\343N\270\327\323Aq\247\336\017\323Aq\247\337k\351\240\270\330\002\347\323Aq\2604\037\246\202\343`l?M\005\306\302\350~\232\013\215\210\r}4\027\033\023/}4\027\033\023`\372h.6\310\001\364\320\\m\220\263\364\320\\m\221?\364\320\\m\227\305\364\320\\m\247_\372h.6\330\037\372h.6\340\003\364\320\\m\3003\364\320\\m\300\277\364\320\\m\326\305\364\320\\m\327\205\364\320\\m\340?\364\320\\m\347Y\372h.6\363\255\3754\027\033y\327\276\232\013\215\276\006\376\232\013\215\276\007\276\232\013\216\000N\3754\027\034\020\034\372h.8\'\337\372h.8\333\315}4\027\034q\346\276\232\013\216:\373\337M\005\307\036d_M\005\307B\027\276\232\013\216\205\226~\232\013\216\205\240\3754\027\035\013L\3754\027\035\013M\3754\027\035\013\355}4\027\035\023-}4\027\035\023\357\3754\027\035e\326~\232\013\216\262\373?M\005\307Y}\277\246\202\343\255\005\257\246\202\343\255\205\257\246\202\343\256\266\367\323Aq\327\204\327\323Aq\327\231g\351\240\270\353\315\013\351\240\270\360\032w\351\240\270\360\032{\351\240\270\360\033\007\323Aq\3408\017\246\202\343\301y\277\246\202\343\301|?M\005\307\231}\237\246\202\343\315\205\277\246\202\343\315\266\037\246\202\343\315\266/\246\202\343\315\274/\246\202\343\316\201\257\246\202\343\342h\037M\005\307\304\353\277M\005\307\304\363_M\005\307\333}\357\246\202\343\356\001\337\246\202\343\356\270\367\323Aq\367\234o\351\240\270\373\354\273\364\320\\}\366^\372h.\200\r?\364\320]\000\034\177\351\240\272\000\264\017\246\202\350\002\333\277M\005\3204\323?M\005\320<\340~\232\013\240y\327\376\232\013\240|M\3754\027@\373\254\3758\330D\343\355}8\330D\360\001\364\343ad\r\267\364\343ae\267\301\364\343ae\301\007\351\346@\006\202\317\323\314\200\r<\327\323\314\200\r>\327\323\314\200\r>\357\323\314\200\r\270\347\320@ \000\0173\364\020\010\001}\267>\202\001\000@|\037A\000\200 \266\317\320@ \010M\013\350 \020\004\310\034\372\010\004\0012\310>\202\001\000M4\337\320@ \t\246\335\372\010\004\0016\313\237A\000\200&\332g\350 \020\004\333\217\375\004\002\000\233u\317\240\200@\023n\275\364\020\010\002p@\372\010\004\0018\'\276\202\001\000N2\357\320@ \t\306_\372\010\004\0018\333\177A\000\200\'\034w\350 \020\004\343\256\375\004\002\000\234u\357\240\262\330\000\000\273\364\026\336\003\256\270\327\320\\\020\000\026\\\372\013\202\000\002\353\337Ap@\000\204\357\320_\000\000\'\236\372\013\340\000\032\023?A|\000\003Bw\350/\200\001}\367\376\202\370\000&\332{\350/\200\002y\340}\005\360\001\226D\037\240\276\0002\310\231\372\013\340\003,\211\277\240\276\0002\310\234\372\013\340\003,\211\357\240\276\0002\310\237\372\013\340\003,\262\017\240\276\0002\320\035\372\013\340\003-\001\357\240\276\0002\320\037\372\013\340\003L\201\377\240\276\0004\330\\\372\013\340\003M\205\337\240\276\0004\330^\372\013\340\003M\205\377\240\276\0004\330\201\364\027\300\006\233\020~\202\370\000\323\317\213\350/\200\r>\000\375\005\360\001\261\001\277\240\276\0006 9\364\027\300\006\304\026\276\202\370\000\330\202\337\320_\000\033\020]\372\013\340\003b\013\337A|\000lA\177\350/\200\r\270\343\177A|\000m\307E\364\027\300\006\335e\317\240\276\0006\353M}4\026B\350[oa\226\337i~\224\000T\320?J\010\001yA\002\343A\270\313*b\321\277d\002-1X\215\256\303w\032K\364\245#\362\260\346,\000_\226I|\245\211\323M\037j\022q\330\202\246\014\233\265,\363\315\276\260\177@\230\362\264\307<\324\025i\245*\321\214\235KT\213X^\326\225\tX\325J\177\224)\244\202)/\237\225\203\361\203\261\223\026\301\372\232\274M_\361@\224\362\264\307<\324\025i\274!h\315P\354\364\267r\330\203\036\257\207\013\355\005\246\\m\357@\003p3p\257\275\256\017\347|\346B\206B\225\035*\rMl\353R\263\320bz\376\024\334R\2512\344;\025\263\\\345\242\265%=\212R{\n\241\252\224\353\377?@\236\362\264\307<\324\025i\245*\304\266\313\013RV\260\275\255*\022\261\016\204\255-\207\245i\274#\204\013K\264\017@\217\362\264\307<\324\025i\006\221\255\334\266 \307\253\207\013\355>\333\302m\277v\204-]\317\353@\217\362\264\307<\324\025j\212\232OR\324\0162\321\240b:\220\307k\030\214\366L\307k\030\216\24417\204-]\207\221\211\274 +\240\266w+\016\274\017@\214\362\267\224!j\354:JD\230\365\177\212\017\332\224\236B\301\035\007\'_@\213\362\264\266\016\222\254z\322c\324\217\211\335\016\214\032\266\344\305\223O@\223\362\264\307<\324\025i\245*\326\027\265\245BVM\203!\177\303\031)\350\027\2564\323?]\254\242\240\267q\367\231k\351\210\352C\035\254b3\3313\035\254b:\220\304\336\020\265v\036F&\360\200\256\202\331\334\254:\360>\273YEAn\343\3572\327\320\311O@\275q\246\231@\217\362\264\307<\324\025i\221Dk \266w1\013\003web\017(\300\016\270\262\303\266\001\000/,\006\326\000V\020>\324/\232\315aQ\006\371\355\372Q\220\255\240~\226\020\002\362\202\005\306\203q\226T\305\243\177\332\225\2153\300\307\332\222\036\221\232\250\027\230\347\232\202\256C\323\017(\377\'\323\222\374\001\023\360\037\036L\272\274\305D\276\254\237/\237yd\333\370\224\003\036\275\035w\352\247N\276\306j\251\346\240\277\226\001\313\315\343[\266I\315E\355\274\031\330W,\246\350*\375\351\230\217\342\277\336\034ZC\2435\323BO\323\311\023fK\306\316\314m;\0179\331\313\223\006\216\267\253\353\217e\373\037\305\2012\343\272/\034)\333]\267r\375\250_5\232\302\242\016E\223\351FB\266\242%a\000/( \\h7\031eLZ7\375\251X\323<\014}\251!\351\031\252\201y\216y\250*\344=?jcJk\325U\036\277@\215\362\264\307<\324\025h\306N\245\252D\177\320b:\220\307k\030\214\366L\307k\030\216\24417\204-]\207\221\211\274 +\240\266w+\016\274\017\256\326QP[\270\373\314\265\364\304u!\216\3261\031\354\231\216\3261\035Hbo\010Z\273\017#\023x@W\232R\260\363\237C%=\002\365\306\232g@\222\362\264\307<\324\025i\016\205\220[;\230\205Y6\014\205\207\244f\252\344\347\244\277@\214\362\264\307<\324\025d\026\316\346!\177\207\361\343\307\324\347\244\277@\234\362\264\307<\324\025i\245*\326\025\025\236\244\025b\036B\255!R3P\205\223`\310_\250\260\250\254\365 \261\020\362\026$\0251G\352(\306N\245\252O\253\n\212\317R\013\021\017!bAS\024~\242\214d\352Z\244@\236\362\264\307<\324\025i\245*\326\025\025\236\244\025h\317\'\245\223`\352D\247\262\221\244\307\251\037\215\232\332\275\232\272\313\'\321\'\266\256\245\223@\222\362\264\307<\324\025dNZ(\224\310\235d$i\265\037\207\275\010&\273\202\037_@\205\035\tY\035\311\354\237\264\037\315\306\232g\371\373R\221\300&\337\020\000\017\265;Zb@\330Y\003-2\317\332\235\2551 l,\201\226\231\027\332\235\2551 l,\201\226\231\017\332\235\2551 l,\201\226Y\177\355N\326\230\2206\026@\313,\267\365\332\323\022\017\346\343M3\374\375\251H\340\023o\210\000\007\332\235\340\376Zg\351\241}4\037\246_\372e\277\347@\223\362\264\307<\324\025i\245*\310-\235\314B\254\233\006B\377\207\361\343\307\324\347\244\277R\203\250\365\027{\213\204\204-i[\005D<\206\252o@\225\362\261j\356\177K[Z\023aGJ\310-\235\314B\254\223R_\202\010Z" } } } events { timestamp { seconds: 1525207293 nanos: 346744029 } write { data: { as_bytes: "\000\035V\000\000\000\000\000\001Google

\"Google\"

 

Advanced searchLanguage tools

© 2018 - Privacy - Terms

\000\000\000\000\001\000\000\000\001" } } } } ================================================ FILE: api/tools/data/tap2pcap_h2_ipv4.txt ================================================ 1 0.000000 127.0.0.1 → 127.0.0.1 HTTP2 157 Magic, SETTINGS[0], WINDOW_UPDATE[0], HEADERS[1]: GET / 2 0.013713 127.0.0.1 → 127.0.0.1 HTTP2 91 SETTINGS[0], SETTINGS[0], WINDOW_UPDATE[0] 3 0.013821 127.0.0.1 → 127.0.0.1 HTTP2 63 SETTINGS[0] 4 0.128649 127.0.0.1 → 127.0.0.1 HTTP2 5586 HEADERS[1]: 200 OK 5 0.130007 127.0.0.1 → 127.0.0.1 HTTP2 7573 DATA[1] 6 0.131045 127.0.0.1 → 127.0.0.1 HTTP2 3152 DATA[1], DATA[1] (text/html) ================================================ FILE: api/tools/generate_listeners.py ================================================ # Map from listeners proto, with holes where filter config fragments should go, and # a list of filter config fragment protos, to a final listeners.pb with the # config fragments converted to the opaque Struct representation. import sys # Some evil hack to deal with the fact that Bazel puts both google/api and # google/protobuf roots in the sys.path, and Python gets confused, e.g. it # thinks that there is no api package if it encounters the google/protobuf root # in sys.path first. from pkgutil import extend_path import google google.__path__ = extend_path(google.__path__, google.__name__) from google.protobuf import json_format from google.protobuf import struct_pb2 from google.protobuf import text_format from envoy.api.v2 import lds_pb2 from envoy.config.filter.network.http_connection_manager.v2 import http_connection_manager_pb2 # Convert an arbitrary proto object to its Struct proto representation. def ProtoToStruct(proto): json_rep = json_format.MessageToJson(proto) parsed_msg = struct_pb2.Struct() json_format.Parse(json_rep, parsed_msg) return parsed_msg # Parse a proto from the filesystem. def ParseProto(path, filter_name): # We only know about some filter config protos ahead of time. KNOWN_FILTERS = { 'http_connection_manager': lambda: http_connection_manager_pb2.HttpConnectionManager() } filter_config = KNOWN_FILTERS[filter_name]() with open(path, 'r') as f: text_format.Merge(f.read(), filter_config) return filter_config def GenerateListeners(listeners_pb_path, output_pb_path, output_json_path, fragments): listener = lds_pb2.Listener() with open(listeners_pb_path, 'r') as f: text_format.Merge(f.read(), listener) for filter_chain in listener.filter_chains: for f in filter_chain.filters: f.config.CopyFrom(ProtoToStruct(ParseProto(next(fragments), f.name))) with open(output_pb_path, 'w') as f: f.write(str(listener)) with open(output_json_path, 'w') as f: f.write(json_format.MessageToJson(listener)) if __name__ == '__main__': if len(sys.argv) < 4: print('Usage: %s ') % sys.argv[0] sys.exit(1) GenerateListeners(sys.argv[1], sys.argv[2], sys.argv[3], iter(sys.argv[4:])) ================================================ FILE: api/tools/generate_listeners_test.py ================================================ """Tests for generate_listeners.""" import os import generate_listeners if __name__ == "__main__": srcdir = os.path.join(os.getenv("TEST_SRCDIR"), 'envoy_api_canonical') generate_listeners.GenerateListeners( os.path.join(srcdir, "examples/service_envoy/listeners.pb"), "/dev/stdout", "/dev/stdout", iter([os.path.join(srcdir, "examples/service_envoy/http_connection_manager.pb")])) ================================================ FILE: api/tools/tap2pcap.py ================================================ """Tool to convert Envoy tap trace format to PCAP. Uses od and text2pcap (part of Wireshark) utilities to translate the Envoy tap trace proto format to a PCAP file suitable for consuming in Wireshark and other tools in the PCAP ecosystem. The TCP stream in the output PCAP is synthesized based on the known IP/port/timestamps that Envoy produces in its tap files; it is not a literal wire tap. Usage: bazel run @envoy_api_canonical//tools:tap2pcap Known issues: - IPv6 PCAP generation has malformed TCP packets. This appears to be a text2pcap issue. TODO(htuch): - Figure out IPv6 PCAP issue above, or file a bug once the root cause is clear. """ from __future__ import print_function import datetime import io import socket import subprocess as sp import sys import time from google.protobuf import text_format from envoy.data.tap.v2alpha import wrapper_pb2 def DumpEvent(direction, timestamp, data): dump = io.StringIO() dump.write('%s\n' % direction) # Adjust to local timezone adjusted_dt = timestamp.ToDatetime() - datetime.timedelta(seconds=time.altzone) dump.write('%s\n' % adjusted_dt) od = sp.Popen(['od', '-Ax', '-tx1', '-v'], stdout=sp.PIPE, stdin=sp.PIPE, stderr=sp.PIPE) packet_dump = od.communicate(data)[0] dump.write(packet_dump.decode()) return dump.getvalue() def Tap2Pcap(tap_path, pcap_path): wrapper = wrapper_pb2.TraceWrapper() if tap_path.endswith('.pb_text'): with open(tap_path, 'r') as f: text_format.Merge(f.read(), wrapper) else: with open(tap_path, 'r') as f: wrapper.ParseFromString(f.read()) trace = wrapper.socket_buffered_trace local_address = trace.connection.local_address.socket_address.address local_port = trace.connection.local_address.socket_address.port_value remote_address = trace.connection.remote_address.socket_address.address remote_port = trace.connection.remote_address.socket_address.port_value dumps = [] for event in trace.events: if event.HasField('read'): dumps.append(DumpEvent('I', event.timestamp, event.read.data.as_bytes)) elif event.HasField('write'): dumps.append(DumpEvent('O', event.timestamp, event.write.data.as_bytes)) ipv6 = False try: socket.inet_pton(socket.AF_INET6, local_address) ipv6 = True except socket.error: pass text2pcap_args = [ 'text2pcap', '-D', '-t', '%Y-%m-%d %H:%M:%S.', '-6' if ipv6 else '-4', '%s,%s' % (remote_address, local_address), '-T', '%d,%d' % (remote_port, local_port), '-', pcap_path ] text2pcap = sp.Popen(text2pcap_args, stdout=sp.PIPE, stdin=sp.PIPE) text2pcap.communicate('\n'.join(dumps).encode()) if __name__ == '__main__': if len(sys.argv) != 3: print('Usage: %s ' % sys.argv[0]) sys.exit(1) Tap2Pcap(sys.argv[1], sys.argv[2]) ================================================ FILE: api/tools/tap2pcap_test.py ================================================ """Tests for tap2pcap.""" from __future__ import print_function import os import subprocess as sp import sys import tap2pcap # Validate that the tapped trace when run through tap2cap | tshark matches # a golden output file for the tshark dump. Since we run tap2pcap in a # subshell with a limited environment, the inferred time zone should be UTC. if __name__ == '__main__': srcdir = os.path.join(os.getenv('TEST_SRCDIR'), 'envoy_api_canonical') tap_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.pb_text') expected_path = os.path.join(srcdir, 'tools/data/tap2pcap_h2_ipv4.txt') pcap_path = os.path.join(os.getenv('TEST_TMPDIR'), 'generated.pcap') tap2pcap.Tap2Pcap(tap_path, pcap_path) actual_output = sp.check_output(['tshark', '-r', pcap_path, '-d', 'tcp.port==10000,http2', '-P']) with open(expected_path, 'rb') as f: expected_output = f.read() if actual_output != expected_output: print('Mismatch') print('Expected: %s' % expected_output) print('Actual: %s' % actual_output) sys.exit(1) ================================================ FILE: api/versioning/BUILD ================================================ # DO NOT EDIT. This file is generated by tools/proto_format/active_protos_gen.py. load("@rules_proto//proto:defs.bzl", "proto_library") licenses(["notice"]) # Apache 2 # This tracks active development versions of protos. proto_library( name = "active_protos", visibility = ["//visibility:public"], deps = [ "//envoy/admin/v3:pkg", "//envoy/config/accesslog/v3:pkg", "//envoy/config/bootstrap/v3:pkg", "//envoy/config/cluster/v3:pkg", "//envoy/config/common/matcher/v3:pkg", "//envoy/config/core/v3:pkg", "//envoy/config/endpoint/v3:pkg", "//envoy/config/filter/thrift/router/v2alpha1:pkg", "//envoy/config/grpc_credential/v3:pkg", "//envoy/config/health_checker/redis/v2:pkg", "//envoy/config/listener/v3:pkg", "//envoy/config/metrics/v3:pkg", "//envoy/config/overload/v3:pkg", "//envoy/config/ratelimit/v3:pkg", "//envoy/config/rbac/v3:pkg", "//envoy/config/resource_monitor/fixed_heap/v2alpha:pkg", "//envoy/config/resource_monitor/injected_resource/v2alpha:pkg", "//envoy/config/retry/omit_canary_hosts/v2:pkg", "//envoy/config/retry/previous_hosts/v2:pkg", "//envoy/config/route/v3:pkg", "//envoy/config/tap/v3:pkg", "//envoy/config/trace/v3:pkg", "//envoy/data/accesslog/v3:pkg", "//envoy/data/cluster/v3:pkg", "//envoy/data/core/v3:pkg", "//envoy/data/dns/v3:pkg", "//envoy/data/tap/v3:pkg", "//envoy/extensions/access_loggers/file/v3:pkg", "//envoy/extensions/access_loggers/grpc/v3:pkg", "//envoy/extensions/access_loggers/wasm/v3:pkg", "//envoy/extensions/clusters/aggregate/v3:pkg", "//envoy/extensions/clusters/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/clusters/redis/v3:pkg", "//envoy/extensions/common/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/common/ratelimit/v3:pkg", "//envoy/extensions/common/tap/v3:pkg", "//envoy/extensions/compression/gzip/compressor/v3:pkg", "//envoy/extensions/compression/gzip/decompressor/v3:pkg", "//envoy/extensions/filters/common/fault/v3:pkg", "//envoy/extensions/filters/http/adaptive_concurrency/v3:pkg", "//envoy/extensions/filters/http/admission_control/v3alpha:pkg", "//envoy/extensions/filters/http/aws_lambda/v3:pkg", "//envoy/extensions/filters/http/aws_request_signing/v3:pkg", "//envoy/extensions/filters/http/buffer/v3:pkg", "//envoy/extensions/filters/http/cache/v3alpha:pkg", "//envoy/extensions/filters/http/cdn_loop/v3alpha:pkg", "//envoy/extensions/filters/http/compressor/v3:pkg", "//envoy/extensions/filters/http/cors/v3:pkg", "//envoy/extensions/filters/http/csrf/v3:pkg", "//envoy/extensions/filters/http/decompressor/v3:pkg", "//envoy/extensions/filters/http/dynamic_forward_proxy/v3:pkg", "//envoy/extensions/filters/http/dynamo/v3:pkg", "//envoy/extensions/filters/http/ext_authz/v3:pkg", "//envoy/extensions/filters/http/fault/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_bridge/v3:pkg", "//envoy/extensions/filters/http/grpc_http1_reverse_bridge/v3:pkg", "//envoy/extensions/filters/http/grpc_json_transcoder/v3:pkg", "//envoy/extensions/filters/http/grpc_stats/v3:pkg", "//envoy/extensions/filters/http/grpc_web/v3:pkg", "//envoy/extensions/filters/http/gzip/v3:pkg", "//envoy/extensions/filters/http/header_to_metadata/v3:pkg", "//envoy/extensions/filters/http/health_check/v3:pkg", "//envoy/extensions/filters/http/ip_tagging/v3:pkg", "//envoy/extensions/filters/http/jwt_authn/v3:pkg", "//envoy/extensions/filters/http/local_ratelimit/v3:pkg", "//envoy/extensions/filters/http/lua/v3:pkg", "//envoy/extensions/filters/http/oauth2/v3alpha:pkg", "//envoy/extensions/filters/http/on_demand/v3:pkg", "//envoy/extensions/filters/http/original_src/v3:pkg", "//envoy/extensions/filters/http/ratelimit/v3:pkg", "//envoy/extensions/filters/http/rbac/v3:pkg", "//envoy/extensions/filters/http/router/v3:pkg", "//envoy/extensions/filters/http/squash/v3:pkg", "//envoy/extensions/filters/http/tap/v3:pkg", "//envoy/extensions/filters/http/wasm/v3:pkg", "//envoy/extensions/filters/listener/http_inspector/v3:pkg", "//envoy/extensions/filters/listener/original_dst/v3:pkg", "//envoy/extensions/filters/listener/original_src/v3:pkg", "//envoy/extensions/filters/listener/proxy_protocol/v3:pkg", "//envoy/extensions/filters/listener/tls_inspector/v3:pkg", "//envoy/extensions/filters/network/client_ssl_auth/v3:pkg", "//envoy/extensions/filters/network/direct_response/v3:pkg", "//envoy/extensions/filters/network/dubbo_proxy/router/v3:pkg", "//envoy/extensions/filters/network/dubbo_proxy/v3:pkg", "//envoy/extensions/filters/network/echo/v3:pkg", "//envoy/extensions/filters/network/ext_authz/v3:pkg", "//envoy/extensions/filters/network/http_connection_manager/v3:pkg", "//envoy/extensions/filters/network/kafka_broker/v3:pkg", "//envoy/extensions/filters/network/local_ratelimit/v3:pkg", "//envoy/extensions/filters/network/mongo_proxy/v3:pkg", "//envoy/extensions/filters/network/mysql_proxy/v3:pkg", "//envoy/extensions/filters/network/postgres_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/ratelimit/v3:pkg", "//envoy/extensions/filters/network/rbac/v3:pkg", "//envoy/extensions/filters/network/redis_proxy/v3:pkg", "//envoy/extensions/filters/network/rocketmq_proxy/v3:pkg", "//envoy/extensions/filters/network/sni_cluster/v3:pkg", "//envoy/extensions/filters/network/sni_dynamic_forward_proxy/v3alpha:pkg", "//envoy/extensions/filters/network/tcp_proxy/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/filters/ratelimit/v3:pkg", "//envoy/extensions/filters/network/thrift_proxy/v3:pkg", "//envoy/extensions/filters/network/wasm/v3:pkg", "//envoy/extensions/filters/network/zookeeper_proxy/v3:pkg", "//envoy/extensions/filters/udp/dns_filter/v3alpha:pkg", "//envoy/extensions/filters/udp/udp_proxy/v3:pkg", "//envoy/extensions/internal_redirect/allow_listed_routes/v3:pkg", "//envoy/extensions/internal_redirect/previous_routes/v3:pkg", "//envoy/extensions/internal_redirect/safe_cross_scheme/v3:pkg", "//envoy/extensions/network/socket_interface/v3:pkg", "//envoy/extensions/retry/host/omit_host_metadata/v3:pkg", "//envoy/extensions/retry/priority/previous_priorities/v3:pkg", "//envoy/extensions/stat_sinks/wasm/v3:pkg", "//envoy/extensions/transport_sockets/alts/v3:pkg", "//envoy/extensions/transport_sockets/proxy_protocol/v3:pkg", "//envoy/extensions/transport_sockets/quic/v3:pkg", "//envoy/extensions/transport_sockets/raw_buffer/v3:pkg", "//envoy/extensions/transport_sockets/tap/v3:pkg", "//envoy/extensions/transport_sockets/tls/v3:pkg", "//envoy/extensions/upstreams/http/generic/v3:pkg", "//envoy/extensions/upstreams/http/http/v3:pkg", "//envoy/extensions/upstreams/http/tcp/v3:pkg", "//envoy/extensions/wasm/v3:pkg", "//envoy/extensions/watchdog/abort_action/v3alpha:pkg", "//envoy/extensions/watchdog/profile_action/v3alpha:pkg", "//envoy/service/accesslog/v3:pkg", "//envoy/service/auth/v3:pkg", "//envoy/service/cluster/v3:pkg", "//envoy/service/discovery/v3:pkg", "//envoy/service/endpoint/v3:pkg", "//envoy/service/event_reporting/v3:pkg", "//envoy/service/extension/v3:pkg", "//envoy/service/health/v3:pkg", "//envoy/service/listener/v3:pkg", "//envoy/service/load_stats/v3:pkg", "//envoy/service/metrics/v3:pkg", "//envoy/service/ratelimit/v3:pkg", "//envoy/service/route/v3:pkg", "//envoy/service/runtime/v3:pkg", "//envoy/service/secret/v3:pkg", "//envoy/service/status/v3:pkg", "//envoy/service/tap/v3:pkg", "//envoy/service/trace/v3:pkg", "//envoy/type/matcher/v3:pkg", "//envoy/type/metadata/v3:pkg", "//envoy/type/tracing/v3:pkg", "//envoy/type/v3:pkg", ], ) # This tracks frozen versions of protos. proto_library( name = "frozen_protos", visibility = ["//visibility:public"], deps = [ "//envoy/admin/v2alpha:pkg", "//envoy/api/v2:pkg", "//envoy/api/v2/auth:pkg", "//envoy/api/v2/cluster:pkg", "//envoy/api/v2/core:pkg", "//envoy/api/v2/endpoint:pkg", "//envoy/api/v2/listener:pkg", "//envoy/api/v2/ratelimit:pkg", "//envoy/api/v2/route:pkg", "//envoy/config/accesslog/v2:pkg", "//envoy/config/bootstrap/v2:pkg", "//envoy/config/cluster/aggregate/v2alpha:pkg", "//envoy/config/cluster/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/cluster/redis:pkg", "//envoy/config/common/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/common/tap/v2alpha:pkg", "//envoy/config/filter/accesslog/v2:pkg", "//envoy/config/filter/dubbo/router/v2alpha1:pkg", "//envoy/config/filter/fault/v2:pkg", "//envoy/config/filter/http/adaptive_concurrency/v2alpha:pkg", "//envoy/config/filter/http/aws_lambda/v2alpha:pkg", "//envoy/config/filter/http/aws_request_signing/v2alpha:pkg", "//envoy/config/filter/http/buffer/v2:pkg", "//envoy/config/filter/http/cache/v2alpha:pkg", "//envoy/config/filter/http/compressor/v2:pkg", "//envoy/config/filter/http/cors/v2:pkg", "//envoy/config/filter/http/csrf/v2:pkg", "//envoy/config/filter/http/dynamic_forward_proxy/v2alpha:pkg", "//envoy/config/filter/http/dynamo/v2:pkg", "//envoy/config/filter/http/ext_authz/v2:pkg", "//envoy/config/filter/http/fault/v2:pkg", "//envoy/config/filter/http/grpc_http1_bridge/v2:pkg", "//envoy/config/filter/http/grpc_http1_reverse_bridge/v2alpha1:pkg", "//envoy/config/filter/http/grpc_stats/v2alpha:pkg", "//envoy/config/filter/http/grpc_web/v2:pkg", "//envoy/config/filter/http/gzip/v2:pkg", "//envoy/config/filter/http/header_to_metadata/v2:pkg", "//envoy/config/filter/http/health_check/v2:pkg", "//envoy/config/filter/http/ip_tagging/v2:pkg", "//envoy/config/filter/http/jwt_authn/v2alpha:pkg", "//envoy/config/filter/http/lua/v2:pkg", "//envoy/config/filter/http/on_demand/v2:pkg", "//envoy/config/filter/http/original_src/v2alpha1:pkg", "//envoy/config/filter/http/rate_limit/v2:pkg", "//envoy/config/filter/http/rbac/v2:pkg", "//envoy/config/filter/http/router/v2:pkg", "//envoy/config/filter/http/squash/v2:pkg", "//envoy/config/filter/http/tap/v2alpha:pkg", "//envoy/config/filter/http/transcoder/v2:pkg", "//envoy/config/filter/listener/http_inspector/v2:pkg", "//envoy/config/filter/listener/original_dst/v2:pkg", "//envoy/config/filter/listener/original_src/v2alpha1:pkg", "//envoy/config/filter/listener/proxy_protocol/v2:pkg", "//envoy/config/filter/listener/tls_inspector/v2:pkg", "//envoy/config/filter/network/client_ssl_auth/v2:pkg", "//envoy/config/filter/network/direct_response/v2:pkg", "//envoy/config/filter/network/dubbo_proxy/v2alpha1:pkg", "//envoy/config/filter/network/echo/v2:pkg", "//envoy/config/filter/network/ext_authz/v2:pkg", "//envoy/config/filter/network/http_connection_manager/v2:pkg", "//envoy/config/filter/network/kafka_broker/v2alpha1:pkg", "//envoy/config/filter/network/local_rate_limit/v2alpha:pkg", "//envoy/config/filter/network/mongo_proxy/v2:pkg", "//envoy/config/filter/network/mysql_proxy/v1alpha1:pkg", "//envoy/config/filter/network/rate_limit/v2:pkg", "//envoy/config/filter/network/rbac/v2:pkg", "//envoy/config/filter/network/redis_proxy/v2:pkg", "//envoy/config/filter/network/sni_cluster/v2:pkg", "//envoy/config/filter/network/tcp_proxy/v2:pkg", "//envoy/config/filter/network/thrift_proxy/v2alpha1:pkg", "//envoy/config/filter/network/zookeeper_proxy/v1alpha1:pkg", "//envoy/config/filter/thrift/rate_limit/v2alpha1:pkg", "//envoy/config/filter/udp/udp_proxy/v2alpha:pkg", "//envoy/config/grpc_credential/v2alpha:pkg", "//envoy/config/listener/v2:pkg", "//envoy/config/metrics/v2:pkg", "//envoy/config/overload/v2alpha:pkg", "//envoy/config/ratelimit/v2:pkg", "//envoy/config/rbac/v2:pkg", "//envoy/config/retry/omit_host_metadata/v2:pkg", "//envoy/config/retry/previous_priorities:pkg", "//envoy/config/trace/v2:pkg", "//envoy/config/trace/v2alpha:pkg", "//envoy/config/transport_socket/alts/v2alpha:pkg", "//envoy/config/transport_socket/raw_buffer/v2:pkg", "//envoy/config/transport_socket/tap/v2alpha:pkg", "//envoy/data/accesslog/v2:pkg", "//envoy/data/cluster/v2alpha:pkg", "//envoy/data/core/v2alpha:pkg", "//envoy/data/dns/v2alpha:pkg", "//envoy/data/tap/v2alpha:pkg", "//envoy/service/accesslog/v2:pkg", "//envoy/service/auth/v2:pkg", "//envoy/service/discovery/v2:pkg", "//envoy/service/event_reporting/v2alpha:pkg", "//envoy/service/load_stats/v2:pkg", "//envoy/service/metrics/v2:pkg", "//envoy/service/ratelimit/v2:pkg", "//envoy/service/status/v2:pkg", "//envoy/service/tap/v2alpha:pkg", "//envoy/service/trace/v2:pkg", "//envoy/type:pkg", "//envoy/type/matcher:pkg", "//envoy/type/metadata/v2:pkg", "//envoy/type/tracing/v2:pkg", ], ) ================================================ FILE: api/xds_protocol.rst ================================================ .. _xds_protocol: xDS REST and gRPC protocol ========================== Envoy discovers its various dynamic resources via the filesystem or by querying one or more management servers. Collectively, these discovery services and their corresponding APIs are referred to as *xDS*. Resources are requested via *subscriptions*, by specifying a filesystem path to watch, initiating gRPC streams, or polling a REST-JSON URL. The latter two methods involve sending requests with a :ref:`DiscoveryRequest ` proto payload. Resources are delivered in a :ref:`DiscoveryResponse ` proto payload in all methods. We discuss each type of subscription below. Resource Types -------------- Every configuration resource in the xDS API has a type associated with it. Resource types follow a :repo:`versioning scheme `. Resource types are versioned independent of the transports described below. The following v2 xDS resource types are supported: - :ref:`envoy.api.v2.Listener ` - :ref:`envoy.api.v2.RouteConfiguration ` - :ref:`envoy.api.v2.ScopedRouteConfiguration ` - :ref:`envoy.api.v2.route.VirtualHost ` - :ref:`envoy.api.v2.Cluster ` - :ref:`envoy.api.v2.ClusterLoadAssignment ` - :ref:`envoy.api.v2.Auth.Secret ` - :ref:`envoy.service.discovery.v2.Runtime ` The following v3 xdS resource types are supported: - :ref:`envoy.config.listener.v3.Listener ` - :ref:`envoy.config.route.v3.RouteConfiguration ` - :ref:`envoy.config.route.v3.ScopedRouteConfiguration ` - :ref:`envoy.config.route.v3.VirtualHost ` - :ref:`envoy.config.cluster.v3.Cluster ` - :ref:`envoy.config.endpoint.v3.ClusterLoadAssignment ` - :ref:`envoy.extensions.transport_sockets.tls.v3.Secret ` - :ref:`envoy.service.runtime.v3.Runtime ` The concept of `type URLs `_ appears below, and takes the form `type.googleapis.com/` -- e.g., `type.googleapis.com/envoy.api.v2.Cluster` for a `Cluster` resource. In various requests from Envoy and responses by the management server, the resource type URL is stated. Filesystem subscriptions ------------------------ The simplest approach to delivering dynamic configuration is to place it at a well known path specified in the :ref:`ConfigSource `. Envoy will use `inotify` (`kqueue` on macOS) to monitor the file for changes and parse the :ref:`DiscoveryResponse ` proto in the file on update. Binary protobufs, JSON, YAML and proto text are supported formats for the :ref:`DiscoveryResponse `. There is no mechanism available for filesystem subscriptions to ACK/NACK updates beyond stats counters and logs. The last valid configuration for an xDS API will continue to apply if an configuration update rejection occurs. .. _xds_protocol_streaming_grpc_subscriptions: Streaming gRPC subscriptions ---------------------------- API flow ~~~~~~~~ For typical HTTP routing scenarios, the core resource types for the client's configuration are `Listener`, `RouteConfiguration`, `Cluster`, and `ClusterLoadAssignment`. Each `Listener` resource may point to a `RouteConfiguration` resource, which may point to one or more `Cluster` resources, and each `Cluster` resource may point to a `ClusterLoadAssignment` resource. Envoy fetches all `Listener` and `Cluster` resources at startup. It then fetches whatever `RouteConfiguration` and `ClusterLoadAssignment` resources that are required by the `Listener` and `Cluster` resources. In effect, every `Listener` or `Cluster` resource is a root to part of Envoy's configuration tree. A non-proxy client such as gRPC might start by fetching only the specific `Listener` resources that it is interested in. It then fetches the `RouteConfiguration` resources required by those `Listener` resources, followed by whichever `Cluster` resources are required by those `RouteConfiguration` resources, followed by the `ClusterLoadAssignment` resources required by the `Cluster` resources. In effect, the original `Listener` resources are the roots to the client's configuration tree. Variants of the xDS Transport Protocol ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Four Variants ^^^^^^^^^^^^^ There are four variants of the xDS transport protocol used via streaming gRPC, which cover all combinations of two dimensions. The first dimension is State of the World (SotW) vs. incremental. The SotW approach was the original mechanism used by xDS, in which the client must specify all resource names it is interested in with each request (except when making a wildcard request in LDS/CDS), and the server must return all resources the client has subscribed to in each request (in LDS/CDS). This means that if the client is already subscribing to 99 resources and wants to add an additional one, it must send a request with all 100 resource names, rather than just the one new one. And the server must then respond by sending all 100 resources, even if the 99 that were already subscribed to have not changed (in LDS/CDS). This mechanism can be a scalability limitation, which is why the incremental protocol variant was introduced. The incremental approach allows both the client and server to indicate only deltas relative to their previous state -- i.e., the client can say that it wants to add or remove its subscription to a particular resource name without resending those that have not changed, and the server can send updates only for those resources that have changed. The incremental protocol also provides a mechanism for lazy loading of resources. For details on the incremental protocol, see :ref:`Incremental xDS ` below. The second dimension is using a separate gRPC stream for each resource type vs. aggregating all resource types onto a single gRPC stream. The former approach was the original mechanism used by xDS, and it offers an eventual consistency model. The latter approach was added for environments in which explicit control of sequencing is required. For details, see :ref:`Eventual consistency considerations ` below. So, the four variants of the xDS transport protocol are: 1. State of the World (Basic xDS): SotW, separate gRPC stream for each resource type 2. Incremental xDS: incremental, separate gRPC stream for each resource type 3. Aggregated Discovery Service (ADS): SotW, aggregate stream for all resource types 4. Incremental ADS: incremental, aggregate stream for all resource types RPC Services and Methods for Each Variant ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For the non-aggregated protocol variants, there is a separate RPC service for each resource type. Each of these RPC services can provide a method for each of the SotW and Incremental protocol variants. Here are the RPC services and methods for each resource type: - Listener: Listener Discovery Service (LDS) - SotW: ListenerDiscoveryService.StreamListeners - Incremental: ListenerDiscoveryService.DeltaListeners - RouteConfiguration: Route Discovery Service (RDS) - SotW: RouteDiscoveryService.StreamRoutes - Incremental: RouteDiscoveryService.DeltaRoutes - ScopedRouteConfiguration: Scoped Route Discovery Service (SRDS) - SotW: ScopedRouteDiscoveryService.StreamScopedRoutes - Incremental: ScopedRouteDiscoveryService.DeltaScopedRoutes - VirtualHost: Virtual Host Discovery Service (VHDS) - SotW: N/A - Incremental: VirtualHostDiscoveryService.DeltaVirtualHosts - Cluster: Cluster Discovery Service (CDS) - SotW: ClusterDiscoveryService.StreamClusters - Incremental: ClusterDiscoveryService.DeltaClusters - ClusterLoadAssignment: Endpoint Discovery Service (EDS) - SotW: EndpointDiscoveryService.StreamEndpoints - Incremental: EndpointDiscoveryService.DeltaEndpoints - Secret: Secret Discovery Service (SDS) - SotW: SecretDiscoveryService.StreamSecrets - Incremental: SecretDiscoveryService.DeltaSecrets - Runtime: Runtime Discovery Service (RTDS) - SotW: RuntimeDiscoveryService.StreamRuntime - Incremental: RuntimeDiscoveryService.DeltaRuntime In the aggregated protocol variants, all resource types are multiplexed on a single gRPC stream, where each resource type is treated as a separate logical stream within the aggregated stream. In effect, it simply combines all of the above separate APIs into a single stream by treating requests and responses for each resource type as a separate sub-stream on the single aggregated stream. The RPC service and methods for the aggregated protocol variants are: - SotW: AggregatedDiscoveryService.StreamAggregatedResources - Incremental: AggregatedDiscoveryService.DeltaAggregatedResources For all of the SotW methods, the request type is :ref:`DiscoveryRequest ` and the response type is :ref:`DiscoveryResponse `. For all of the incremental methods, the request type is :ref:`DeltaDiscoveryRequest ` and the response type is :ref:`DeltaDiscoveryResponse `. Configuring Which Variant to Use ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In the xDS API, the :ref:`ConfigSource ` message indicates how to obtain resources of a particular type. If the :ref:`ConfigSource ` contains a gRPC :ref:`ApiConfigSource `, it points to an upstream cluster for the management server; this will initiate an independent bidirectional gRPC stream for each xDS resource type, potentially to distinct management servers. If the :ref:`ConfigSource ` contains a :ref:`AggregatedConfigSource `, it tells the client to use :ref:`ADS `. Currently, the client is expected to be given some local configuration that tells it how to obtain the :ref:`Listener ` and :ref:`Cluster ` resources. :ref:`Listener ` resources may include a :ref:`ConfigSource ` that indicates how the :ref:`RouteConfiguration ` resources are obtained, and :ref:`Cluster ` resources may include a :ref:`ConfigSource ` that indicates how the :ref:`ClusterLoadAssignment ` resources are obtained. Client Configuration """""""""""""""""""" In Envoy, the bootstrap file contains two :ref:`ConfigSource ` messages, one indicating how :ref:`Listener ` resources are obtained and another indicating how :ref:`Cluster ` resources are obtained. It also contains a separate :ref:`ApiConfigSource ` message indicating how to contact the ADS server, which will be used whenever a :ref:`ConfigSource ` message (either in the bootstrap file or in a :ref:`Listener ` or :ref:`Cluster ` resource obtained from a management server) contains an :ref:`AggregatedConfigSource ` message. In a gRPC client that uses xDS, only ADS is supported, and the bootstrap file contains the name of the ADS server, which will be used for all resources. The :ref:`ConfigSource ` messages in the :ref:`Listener ` and :ref:`Cluster ` resources must contain :ref:`AggregatedConfigSource ` messages. The xDS transport Protocol ~~~~~~~~~~~~~~~~~~~~~~~~~~ Transport API version ^^^^^^^^^^^^^^^^^^^^^ In addition the resource type version described above, the xDS wire protocol has a transport version associated with it. This provides type versioning for messages such as :ref:`DiscoveryRequest ` and :ref:`DiscoveryResponse `. It is also encoded in the gRPC method name, so a server can determine which version a client is speaking based on which method it calls. Basic Protocol Overview ^^^^^^^^^^^^^^^^^^^^^^^ Each xDS stream begins with a :ref:`DiscoveryRequest ` from the client, which specifies the list of resources to subscribe to, the type URL corresponding to the subscribed resources, the node identifier, and an optional resource type instance version indicating the most recent version of the resource type that the client has already seen (see :ref:`ACK/NACK and resource type instance version ` for details). The server will then send a :ref:`DiscoveryResponse ` containing any resources that the client has subscribed to that have changed since the last resource type instance version that the client indicated it has seen. The server may send additional responses at any time when the subscribed resources change. Whenever the client receives a new response, it will send another request indicating whether or not the resources in the response were valid (see :ref:`ACK/NACK and resource type instance version ` for details). Only the first request on a stream is guaranteed to carry the node identifier. The subsequent discovery requests on the same stream may carry an empty node identifier. This holds true regardless of the acceptance of the discovery responses on the same stream. The node identifier should always be identical if present more than once on the stream. It is sufficient to only check the first message for the node identifier as a result. .. _xds_ack_nack: ACK/NACK and resource type instance version ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Every xDS resource type has a version string that indicates the version for that resource type. Whenever one resource of that type changes, the version is changed. In a responses sent by the xDS server, the :ref:`version_info` field indicates the current version for that resource type. The client then sends another request to the server with the :ref:`version_info` field indicating the most recent valid version seen by the client. This provides a way for the server to determine when it sends a version that the client considers invalid. (In the :ref:`incremental protocol variants `, the resource type instance version is sent by the server in the :ref:`system_version_info` field. However, this information is not actually used by the client to communicate which resources are valid, because the incremental API variants have a separate mechanism for that.) The resource type instance version is separate for each resource type. When using the aggregated protocol variants, each resource type has its own version even though all resource types are being sent on the same stream. The resource type is also separate for each xDS server (where an xDS server is identified by a unique :ref:`ConfigSource `). When obtaining resources of a given type from multiple xDS servers, each xDS server will have a different notion of version. Note that the version for a resource type is not a property of an individual xDS stream but rather a property of the resources themselves. If the stream becomes broken and the client creates a new stream, the client's initial request on the new stream should indicate the most recent version seen by the client on the previous stream. An example EDS request might be: .. code:: yaml version_info: node: { id: envoy } resource_names: - foo - bar type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment response_nonce: The management server may reply either immediately or when the requested resources are available with a :ref:`DiscoveryResponse `, e.g.: .. code:: yaml version_info: X resources: - foo ClusterLoadAssignment proto encoding - bar ClusterLoadAssignment proto encoding type_url: type.googleapis.com/envoy.api.v2.ClusterLoadAssignment nonce: A After processing the :ref:`DiscoveryResponse `, Envoy will send a new request on the stream, specifying the last version successfully applied and the nonce provided by the management server. The version provides Envoy and the management server a shared notion of the currently applied configuration, as well as a mechanism to ACK/NACK configuration updates. ACK ^^^ If the update was successfully applied, the :ref:`version_info ` will be **X**, as indicated in the sequence diagram: .. figure:: diagrams/simple-ack.svg :alt: Version update after ACK NACK ^^^^ If Envoy had instead rejected configuration update **X**, it would reply with :ref:`error_detail ` populated and its previous version, which in this case was the empty initial version. The :ref:`error_detail ` has more details around the exact error message populated in the message field: .. figure:: diagrams/simple-nack.svg :alt: No version update after NACK In the sequence diagrams, the following format is used to abbreviate messages: - *DiscoveryRequest*: (V=version_info,R=resource_names,N=response_nonce,T=type_url) - *DiscoveryResponse*: (V=version_info,R=resources,N=nonce,T=type_url) After a NACK, an API update may succeed at a new version **Y**: .. figure:: diagrams/later-ack.svg :alt: ACK after NACK ACK and NACK semantics summary ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - The xDS client should ACK or NACK every :ref:`DiscoveryResponse ` received from the management server. - Like all other requests, the nonce from the :ref:`DiscoveryResponse ` is sent as :ref:`response_nonce `. As described in :ref:`resource update ` the nonce is used in certain race conditions to disambiguate between ACK and NACK. - ACK signifies successful configuration update and contains the :ref:`version_info ` from the :ref:`DiscoveryResponse `. - NACK signifies unsuccessful configuration update and contains the previous (existing) :ref:`version_info `. - Only the NACK should populate the :ref:`error_detail `. .. _xds_protocol_resource_update: When to send an update ^^^^^^^^^^^^^^^^^^^^^^ The management server should only send updates to the Envoy client when the resources in the :ref:`DiscoveryResponse ` have changed. Envoy replies to any :ref:`DiscoveryResponse ` with a :ref:`DiscoveryRequest ` containing the ACK/NACK immediately after it has been either accepted or rejected. If the management server provides the same set of resources rather than waiting for a change to occur, it will cause needless work on both the client and the management server, which could have a severe performance impact. Within a stream, new :ref:`DiscoveryRequests ` supersede any prior :ref:`DiscoveryRequests ` having the same resource type. This means that the management server only needs to respond to the latest :ref:`DiscoveryRequest ` on each stream for any given resource type. .. _xds_protocol_resource_hints: How the client specifies what resources to return ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ xDS requests allow the client to specify a set of resource names as a hint to the server about which resources the client is interested in. In the SotW protocol variants, this is done via the :ref:`resource_names ` specified in the :ref:`DiscoveryRequest `; in the incremental protocol variants, this is done via the :ref:`resource_names_subscribe ` and :ref:`resource_names_unsubscribe ` fields in the :ref:`DeltaDiscoveryRequest `. Normally (see below for exceptions), requests must specify the set of resource names that the client is interested in. The management server must supply the requested resources if they exist. The client will silently ignore any supplied resources that were not explicitly requested. When the client sends a new request that changes the set of resources being requested, the server must resend any newly requested resources, even if it previously sent those resources without having been asked for them and the resources have not changed since that time. If the list of resource names becomes empty, that means that the client is no longer interested in any resources of the specified type. For :ref:`Listener ` and :ref:`Cluster ` resource types, there is also a "wildcard" mode, which is triggered when the initial request on the stream for that resource type contains no resource names. In this case, the server should use site-specific business logic to determine the full set of resources that the client is interested in, typically based on the client's :ref:`node ` identification. Note that once a stream has entered wildcard mode for a given resource type, there is no way to change the stream out of wildcard mode; resource names specified in any subsequent request on the stream will be ignored. Client Behavior """"""""""""""" Envoy will always use wildcard mode for :ref:`Listener ` and :ref:`Cluster ` resources. However, other xDS clients (such as gRPC clients that use xDS) may specify explicit resource names for these resource types, for example if they only have a singleton listener and already know its name from some out-of-band configuration. Grouping Resources into Responses ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In the incremental protocol variants, the server sends each resource in its own response. This means that if the server has previously sent 100 resources and only one of them has changed, it may send a response containing only the changed resource; it does not need to resend the 99 resources that have not changed, and the client must not delete the unchanged resources. In the SotW protocol variants, all resource types except for :ref:`Listener ` and :ref:`Cluster ` are grouped into responses in the same way as in the incremental protocol variants. However, :ref:`Listener ` and :ref:`Cluster ` resource types are handled differently: the server must include the complete state of the world, meaning that all resources of the relevant type that are needed by the client must be included, even if they did not change since the last response. This means that if the server has previously sent 100 resources and only one of them has changed, it must resend all 100 of them, even the 99 that were not modified. Note that all of the protocol variants operate on units of whole named resources. There is no mechanism for providing incremental updates of repeated fields within a named resource. Most notably, there is currently no mechanism for incrementally updating individual endpoints within an EDS response. Duplicate Resource Names ^^^^^^^^^^^^^^^^^^^^^^^^ It is an error for a server to send a single response that contains the same resource name twice. Clients should NACK responses that contain multiple instances of the same resource name. Deleting Resources ^^^^^^^^^^^^^^^^^^ In the incremental proocol variants, the server signals the client that a resource should be deleted via the :ref:`removed_resources ` field of the response. This tells the client to remove the resource from its local cache. In the SotW protocol variants, the criteria for deleting resources is more complex. For :ref:`Listener ` and :ref:`Cluster ` resource types, if a previously seen resource is not present in a new response, that indicates that the resource has been removed, and the client must delete it; a response containing no resources means to delete all resources of that type. However, for other resource types, the API provides no mechanism for the server to tell the client that resources have been deleted; instead, deletions are indicated implicitly by parent resources being changed to no longer refer to a child resource. For example, when the client receives an LDS update removing a :ref:`Listener ` that was previously pointing to :ref:`RouteConfiguration ` A, if no other :ref:`Listener ` is pointing to :ref:`RouteConfiguration ` A, then the client may delete A. For those resource types, an empty :ref:`DiscoveryResponse ` is effectively a no-op from the client's perspective. Knowing When a Requested Resource Does Not Exist ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The SotW protocol variants do not provide any explicit mechanism to determine when a requested resource does not exist. Responses for :ref:`Listener ` and :ref:`Cluster ` resource types must include all resources requested by the client. However, it may not be possible for the client to know that a resource does not exist based solely on its absence in a response, because the delivery of the updates is eventually consistent: if the client initially sends a request for resource A, then sends a request for resources A and B, and then sees a response containing only resource A, the client cannot conclude that resource B does not exist, because the response may have been sent on the basis of the first request, before the server saw the second request. For other resource types, because each resource can be sent in its own response, there is no way to know from the next response whether the newly requested resource exists, because the next response could be an unrelated update for another resource that had already been subscribed to previously. As a result, clients are expected to use a timeout (recommended duration is 15 seconds) after sending a request for a new resource, after which they will consider the requested resource to not exist if they have not received the resource. In Envoy, this is done for :ref:`RouteConfiguration ` and :ref:`ClusterLoadAssignment ` resources during :ref:`resource warming `. Note that this timeout is not strictly necessary when using wildcard mode for :ref:`Listener ` and :ref:`Cluster ` resource types, because in that case every response will contain all existing resources that are relevant to the client, so the client can know that a resource does not exist by its absence in the next response it sees. However, using a timeout is still recommended in this case, since it protects against the case where the management server fails to send a response in a timely manner. Note that even if a requested resource does not exist at the moment when the client requests it, that resource could be created at any time. Management servers must remember the set of resources being requested by the client, and if one of those resources springs into existence later, the server must send an update to the client informing it of the new resource. Clients that initially see a resource that does not exist must be prepared for the resource to be created at any time. Unsubscribing From Resources ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In the incremental protocol variants, resources can be unsubscribed to via the :ref:`resource_names_unsubscribe ` field. In the SotW protocol variants, each request must contain the full list of resource names being subscribed to in the :ref:`resource_names ` field, so unsubscribing to a set of resources is done by sending a new request containing all resource names that are still being subscribed to but not containing the resource names being unsubscribed to. For example, if the client had previously been subscribed to resources A and B but wishes to unsubscribe from B, it must send a new request containing only resource A. Note that for :ref:`Listener ` and :ref:`Cluster ` resource types where the stream is in "wildcard" mode (see :ref:`How the client specifies what resources to return ` for details), the set of resources being subscribed to is determined by the server instead of the client, so there is no mechanism for the client to unsubscribe from resources. Requesting Multiple Resources on a Single Stream ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ For EDS/RDS, Envoy may either generate a distinct stream for each resource of a given type (e.g. if each :ref:`ConfigSource ` has its own distinct upstream cluster for a management server), or may combine together multiple resource requests for a given resource type when they are destined for the same management server. While this is left to implementation specifics, management servers should be capable of handling one or more :ref:`resource_names ` for a given resource type in each request. Both sequence diagrams below are valid for fetching two EDS resources `{foo, bar}`: |Multiple EDS requests on the same stream| |Multiple EDS requests on distinct streams| Resource updates ^^^^^^^^^^^^^^^^ As discussed above, Envoy may update the list of :ref:`resource_names ` it presents to the management server in each :ref:`DiscoveryRequest ` that ACK/NACKs a specific :ref:`DiscoveryResponse `. In addition, Envoy may later issue additional :ref:`DiscoveryRequests ` at a given :ref:`version_info ` to update the management server with new resource hints. For example, if Envoy is at EDS version **X** and knows only about cluster ``foo``, but then receives a CDS update and learns about ``bar`` in addition, it may issue an additional :ref:`DiscoveryRequest ` for **X** with `{foo,bar}` as `resource_names`. .. figure:: diagrams/cds-eds-resources.svg :alt: CDS response leads to EDS resource hint update There is a race condition that may arise here; if after a resource hint update is issued by Envoy at **X**, but before the management server processes the update it replies with a new version **Y**, the resource hint update may be interpreted as a rejection of **Y** by presenting an **X** :ref:`version_info `. To avoid this, the management server provides a ``nonce`` that Envoy uses to indicate the specific :ref:`DiscoveryResponse ` each :ref:`DiscoveryRequest ` corresponds to: .. figure:: diagrams/update-race.svg :alt: EDS update race motivates nonces The management server should not send a :ref:`DiscoveryResponse ` for any :ref:`DiscoveryRequest ` that has a stale nonce. A nonce becomes stale following a newer nonce being presented to Envoy in a :ref:`DiscoveryResponse `. A management server does not need to send an update until it determines a new version is available. Earlier requests at a version then also become stale. It may process multiple :ref:`DiscoveryRequests ` at a version until a new version is ready. .. figure:: diagrams/stale-requests.svg :alt: Requests become stale An implication of the above resource update sequencing is that Envoy does not expect a :ref:`DiscoveryResponse ` for every :ref:`DiscoveryRequests ` it issues. .. _xds_protocol_resource_warming: Resource warming ~~~~~~~~~~~~~~~~ :ref:`Clusters ` and :ref:`Listeners ` go through warming before they can serve requests. This process happens both during :ref:`Envoy initialization ` and when the `Cluster` or `Listener` is updated. Warming of `Cluster` is completed only when a `ClusterLoadAssignment` response is supplied by management server. Similarly, warming of `Listener` is completed only when a `RouteConfiguration` is supplied by management server if the listener refers to an RDS configuration. Management server is expected to provide the EDS/RDS updates during warming. If management server does not provide EDS/RDS responses, Envoy will not initialize itself during the initialization phase and the updates sent via CDS/LDS will not take effect until EDS/RDS responses are supplied. .. _xds_protocol_eventual_consistency_considerations: Eventual consistency considerations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Since Envoy's xDS APIs are eventually consistent, traffic may drop briefly during updates. For example, if only cluster **X** is known via CDS/EDS, a `RouteConfiguration` references cluster **X** and is then adjusted to cluster **Y** just before the CDS/EDS update providing **Y**, traffic will be blackholed until **Y** is known about by the Envoy instance. For some applications, a temporary drop of traffic is acceptable, retries at the client or by other Envoy sidecars will hide this drop. For other scenarios where drop can't be tolerated, traffic drop could have been avoided by providing a CDS/EDS update with both **X** and **Y**, then the RDS update repointing from **X** to **Y** and then a CDS/EDS update dropping **X**. In general, to avoid traffic drop, sequencing of updates should follow a make before break model, wherein: - CDS updates (if any) must always be pushed first. - EDS updates (if any) must arrive after CDS updates for the respective clusters. - LDS updates must arrive after corresponding CDS/EDS updates. - RDS updates related to the newly added listeners must arrive after CDS/EDS/LDS updates. - VHDS updates (if any) related to the newly added RouteConfigurations must arrive after RDS updates. - Stale CDS clusters and related EDS endpoints (ones no longer being referenced) can then be removed. xDS updates can be pushed independently if no new clusters/routes/listeners are added or if it's acceptable to temporarily drop traffic during updates. Note that in case of LDS updates, the listeners will be warmed before they receive traffic, i.e. the dependent routes are fetched through RDS if configured. Clusters are warmed when adding/removing/updating clusters. On the other hand, routes are not warmed, i.e., the management plane must ensure that clusters referenced by a route are in place, before pushing the updates for a route. .. _xds_protocol_ads: Aggregated Discovery Service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It's challenging to provide the above guarantees on sequencing to avoid traffic drop when management servers are distributed. ADS allow a single management server, via a single gRPC stream, to deliver all API updates. This provides the ability to carefully sequence updates to avoid traffic drop. With ADS, a single stream is used with multiple independent :ref:`DiscoveryRequest `/:ref:`DiscoveryResponse ` sequences multiplexed via the type URL. For any given type URL, the above sequencing of :ref:`DiscoveryRequest ` and :ref:`DiscoveryResponse ` messages applies. An example update sequence might look like: .. figure:: diagrams/ads.svg :alt: EDS/CDS multiplexed on an ADS stream A single ADS stream is available per Envoy instance. An example minimal ``bootstrap.yaml`` fragment for ADS configuration is: .. code:: yaml node: id: dynamic_resources: cds_config: {ads: {}} lds_config: {ads: {}} ads_config: api_type: GRPC grpc_services: envoy_grpc: cluster_name: ads_cluster static_resources: clusters: - name: ads_cluster connect_timeout: { seconds: 5 } type: STATIC hosts: - socket_address: address: port_value: lb_policy: ROUND_ROBIN # It is recommended to configure either HTTP/2 or TCP keepalives in order to detect # connection issues, and allow Envoy to reconnect. TCP keepalive is less expensive, but # may be inadequate if there is a TCP proxy between Envoy and the management server. # HTTP/2 keepalive is slightly more expensive, but may detect issues through more types # of intermediate proxies. http2_protocol_options: connection_keepalive: interval: 30s timeout: 5s upstream_connection_options: tcp_keepalive: ... admin: ... .. _xds_protocol_delta: Incremental xDS ~~~~~~~~~~~~~~~ Incremental xDS is a separate xDS endpoint that: - Allows the protocol to communicate on the wire in terms of resource/resource name deltas ("Delta xDS"). This supports the goal of scalability of xDS resources. Rather than deliver all 100k clusters when a single cluster is modified, the management server only needs to deliver the single cluster that changed. - Allows the Envoy to on-demand / lazily request additional resources. For example, requesting a cluster only when a request for that cluster arrives. An Incremental xDS session is always in the context of a gRPC bidirectional stream. This allows the xDS server to keep track of the state of xDS clients connected to it. There is no REST version of Incremental xDS yet. In the delta xDS wire protocol, the nonce field is required and used to pair a :ref:`DeltaDiscoveryResponse ` to a :ref:`DeltaDiscoveryRequest ` ACK or NACK. Optionally, a response message level :ref:`system_version_info ` is present for debugging purposes only. :ref:`DeltaDiscoveryRequest ` can be sent in the following situations: - Initial message in a xDS bidirectional gRPC stream. - As an ACK or NACK response to a previous :ref:`DeltaDiscoveryResponse `. In this case the :ref:`response_nonce ` is set to the nonce value in the Response. ACK or NACK is determined by the absence or presence of :ref:`error_detail `. - Spontaneous :ref:`DeltaDiscoveryRequests ` from the client. This can be done to dynamically add or remove elements from the tracked :ref:`resource_names ` set. In this case :ref:`response_nonce ` must be omitted. In this first example the client connects and receives a first update that it ACKs. The second update fails and the client NACKs the update. Later the xDS client spontaneously requests the "wc" resource. .. figure:: diagrams/incremental.svg :alt: Incremental session example On reconnect the Incremental xDS client may tell the server of its known resources to avoid resending them over the network. Because no state is assumed to be preserved from the previous stream, the reconnecting client must provide the server with all resource names it is interested in. .. figure:: diagrams/incremental-reconnect.svg :alt: Incremental reconnect example Resource names ^^^^^^^^^^^^^^ Resources are identified by a resource name or an alias. Aliases of a resource, if present, can be identified by the alias field in the resource of a :ref:`DeltaDiscoveryResponse `. The resource name will be returned in the name field in the resource of a :ref:`DeltaDiscoveryResponse `. .. _xds_protocol_delta_subscribe: Subscribing to Resources ^^^^^^^^^^^^^^^^^^^^^^^^ The client can send either an alias or the name of a resource in the :ref:`resource_names_subscribe ` field of a :ref:`DeltaDiscoveryRequest ` in order to subscribe to a resource. Both the names and aliases of resources should be checked in order to determine whether the entity in question has been subscribed to. A :ref:`resource_names_subscribe ` field may contain resource names that the server believes the client is already subscribed to, and furthermore has the most recent versions of. However, the server *must* still provide those resources in the response; due to implementation details hidden from the server, the client may have "forgotten" those resources despite apparently remaining subscribed. .. _xds_protocol_unsubscribe: Unsubscribing from Resources ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When a client loses interest in some resources, it will indicate that with the :ref:`resource_names_unsubscribe ` field of a :ref:`DeltaDiscoveryRequest `. As with :ref:`resource_names_subscribe `, these may be resource names or aliases. A :ref:`resource_names_unsubscribe ` field may contain superfluous resource names, which the server thought the client was already not subscribed to. The server must cleanly process such a request; it can simply ignore these phantom unsubscriptions. Knowing When a Requested Resource Does Not Exist ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When a resource subscribed to by a client does not exist, the server will send a :ref:`Resource ` whose :ref:`name ` field matches the name that the client subscribed to and whose :ref:`resource ` field is unset. This allows the client to quickly determine when a resource does not exist without waiting for a timeout, as would be done in the SotW protocol variants. However, clients are still encouraged to use a timeout to protect against the case where the management server fails to send a response in a timely manner. REST-JSON polling subscriptions ------------------------------- Synchronous (long) polling via REST endpoints is also available for the xDS singleton APIs. The above sequencing of messages is similar, except no persistent stream is maintained to the management server. It is expected that there is only a single outstanding request at any point in time, and as a result the response nonce is optional in REST-JSON. The `JSON canonical transform of proto3 `__ is used to encode :ref:`DiscoveryRequest ` and :ref:`DiscoveryResponse ` messages. ADS is not available for REST-JSON polling. When the poll period is set to a small value, with the intention of long polling, then there is also a requirement to avoid sending a :ref:`DiscoveryResponse ` unless a change to the underlying resources has occurred via a :ref:`resource update `. .. |Multiple EDS requests on the same stream| image:: diagrams/eds-same-stream.svg .. |Multiple EDS requests on distinct streams| image:: diagrams/eds-distinct-stream.svg ================================================ FILE: bazel/BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library", "cc_proto_library") load("//bazel:envoy_build_system.bzl", "envoy_package") load("//bazel:envoy_internal.bzl", "envoy_select_force_libcpp") licenses(["notice"]) # Apache 2 envoy_package() exports_files([ "gen_sh_test_runner.sh", "sh_test_wrapper.sh", "test_for_benchmark_wrapper.sh", ]) genrule( name = "gnu_build_id", outs = ["gnu_build_id.ldscript"], cmd = """ echo --build-id=0x$$( grep BUILD_SCM_REVISION bazel-out/volatile-status.txt \\ | sed 's/^BUILD_SCM_REVISION //') \\ > $@ """, # Undocumented attr to depend on workspace status files. # https://github.com/bazelbuild/bazel/issues/4942 stamp = 1, ) # For macOS, which doesn't have GNU ld's `--build-id` flag. genrule( name = "raw_build_id", outs = ["raw_build_id.ldscript"], cmd = """ grep BUILD_SCM_REVISION bazel-out/volatile-status.txt \\ | sed 's/^BUILD_SCM_REVISION //' \\ | tr -d '\\n' \\ > $@ """, # Undocumented attr to depend on workspace status files. # https://github.com/bazelbuild/bazel/issues/4942 stamp = 1, ) # A target to optionally link C++ standard library dynamically in sanitizer runs. # TSAN doesn't support libc/libstdc++ static linking per doc: # http://releases.llvm.org/8.0.1/tools/clang/docs/ThreadSanitizer.html cc_library( name = "dynamic_stdlib", linkopts = envoy_select_force_libcpp( ["-lc++"], ["-lstdc++"], ), ) cc_library( name = "static_stdlib", linkopts = select({ "//bazel:linux": ["-static-libgcc"], "//conditions:default": [], }), ) config_setting( name = "windows_opt_build", values = { "cpu": "x64_windows", "compilation_mode": "opt", }, ) config_setting( name = "windows_dbg_build", values = { "cpu": "x64_windows", "compilation_mode": "dbg", }, ) config_setting( name = "windows_fastbuild_build", values = { "cpu": "x64_windows", "compilation_mode": "fastbuild", }, ) config_setting( name = "opt_build", values = {"compilation_mode": "opt"}, ) config_setting( name = "fastbuild_build", values = {"compilation_mode": "fastbuild"}, ) config_setting( name = "dbg_build", values = {"compilation_mode": "dbg"}, ) config_setting( name = "no_debug_info", values = {"define": "no_debug_info=1"}, ) config_setting( name = "asan_build", values = {"define": "ENVOY_CONFIG_ASAN=1"}, ) config_setting( name = "tsan_build", values = {"define": "ENVOY_CONFIG_TSAN=1"}, ) config_setting( name = "msan_build", values = {"define": "ENVOY_CONFIG_MSAN=1"}, ) config_setting( name = "coverage_build", values = {"define": "ENVOY_CONFIG_COVERAGE=1"}, ) config_setting( name = "clang_build", flag_values = { "@bazel_tools//tools/cpp:compiler": "clang", }, ) config_setting( name = "gcc_build", flag_values = { "@bazel_tools//tools/cpp:compiler": "gcc", }, ) config_setting( name = "dynamic_link_tests", values = { "define": "dynamic_link_tests=true", }, ) config_setting( name = "disable_tcmalloc", values = {"define": "tcmalloc=disabled"}, ) config_setting( name = "debug_tcmalloc", values = {"define": "tcmalloc=debug"}, ) config_setting( name = "gperftools_tcmalloc", values = {"define": "tcmalloc=gperftools"}, ) # As select() can't be nested we need these specialized settings to avoid ambiguity when choosing # tcmalloc's flavor for x86_64 builds. config_setting( name = "disable_tcmalloc_on_linux_x86_64", values = { "define": "tcmalloc=disabled", "cpu": "k8", }, ) config_setting( name = "gperftools_tcmalloc_on_linux_x86_64", values = { "define": "tcmalloc=gperftools", "cpu": "k8", }, ) config_setting( name = "debug_tcmalloc_on_linux_x86_64", values = { "define": "tcmalloc=debug", "cpu": "k8", }, ) config_setting( name = "disable_signal_trace", values = {"define": "signal_trace=disabled"}, ) config_setting( name = "disable_object_dump_on_signal_trace", values = {"define": "object_dump_on_signal_trace=disabled"}, ) config_setting( name = "disable_deprecated_features", values = {"define": "deprecated_features=disabled"}, ) config_setting( name = "disable_hot_restart", values = {"define": "hot_restart=disabled"}, ) # Used to avoid conflicting selects https://github.com/bazelbuild/bazel/issues/8323 alias( name = "disable_hot_restart_or_apple", actual = select({ ":apple": ":apple", "//conditions:default": ":disable_hot_restart", }), ) config_setting( name = "disable_google_grpc", values = {"define": "google_grpc=disabled"}, ) config_setting( name = "enable_path_normalization_by_default", values = {"define": "path_normalization_by_default=true"}, ) config_setting( name = "enable_new_codecs_in_integration_tests", values = {"define": "use_new_codecs_in_integration_tests=true"}, ) cc_proto_library( name = "grpc_health_proto", deps = ["@com_github_grpc_grpc//src/proto/grpc/health/v1:_health_proto_only"], ) config_setting( name = "enable_exported_symbols", values = {"define": "exported_symbols=enabled"}, ) config_setting( name = "enable_log_debug_assert_in_release", values = {"define": "log_debug_assert_in_release=enabled"}, ) config_setting( name = "disable_known_issue_asserts", values = {"define": "disable_known_issue_asserts=true"}, ) config_setting( name = "enable_perf_annotation", values = {"define": "perf_annotation=enabled"}, ) config_setting( name = "force_libcpp", values = {"define": "force_libcpp=enabled"}, ) config_setting( name = "boringssl_fips", constraint_values = [ "@bazel_tools//platforms:linux", "@bazel_tools//platforms:x86_64", ], values = {"define": "boringssl=fips"}, ) config_setting( name = "boringssl_disabled", values = {"define": "boringssl=disabled"}, ) config_setting( name = "zlib_ng", constraint_values = [ "@bazel_tools//platforms:linux", ], values = {"define": "zlib=ng"}, ) config_setting( name = "enable_quiche", values = {"define": "quiche=enabled"}, ) # TODO: consider converting WAVM VM support to an extension (https://github.com/envoyproxy/envoy/issues/12574) config_setting( name = "wasm_all", values = {"define": "wasm=enabled"}, ) config_setting( name = "wasm_wavm", values = {"define": "wasm=wavm"}, ) config_setting( name = "wasm_v8", values = {"define": "wasm=v8"}, ) config_setting( name = "wasm_none", values = {"define": "wasm=disabled"}, ) # Alias pointing to the selected version of BoringSSL: # - BoringSSL FIPS from @boringssl_fips//:ssl, # - non-FIPS BoringSSL from @boringssl//:ssl. alias( name = "boringssl", actual = select({ "//bazel:boringssl_fips": "@boringssl_fips//:ssl", "//conditions:default": "@boringssl//:ssl", }), ) config_setting( name = "linux_x86_64", values = {"cpu": "k8"}, ) config_setting( name = "linux_aarch64", values = {"cpu": "aarch64"}, ) config_setting( name = "linux_ppc", values = {"cpu": "ppc"}, ) config_setting( name = "linux_s390x", values = {"cpu": "s390x"}, ) config_setting( name = "linux_mips64", values = {"cpu": "mips64"}, ) config_setting( name = "windows_x86_64", values = {"cpu": "x64_windows"}, ) # Configuration settings to make doing selects for Apple vs non-Apple platforms # easier. More details: https://docs.bazel.build/versions/master/configurable-attributes.html#config_settingaliasing config_setting( name = "darwin", values = {"cpu": "darwin"}, ) config_setting( name = "darwin_x86_64", values = {"cpu": "darwin_x86_64"}, ) config_setting( name = "ios_i386", values = {"cpu": "ios_i386"}, ) config_setting( name = "ios_x86_64", values = {"cpu": "ios_x86_64"}, ) config_setting( name = "ios_armv7", values = {"cpu": "ios_armv7"}, ) config_setting( name = "ios_armv7s", values = {"cpu": "ios_armv7s"}, ) config_setting( name = "ios_arm64", values = {"cpu": "ios_arm64"}, ) config_setting( name = "ios_arm64e", values = {"cpu": "ios_arm64e"}, ) config_setting( name = "manual_stamp", values = {"define": "manual_stamp=manual_stamp"}, ) config_setting( name = "android_logger", values = {"define": "logger=android"}, ) config_setting( name = "libfuzzer_coverage", define_values = { "FUZZING_ENGINE": "libfuzzer", "ENVOY_CONFIG_COVERAGE": "1", }, ) config_setting( name = "libfuzzer", values = {"define": "FUZZING_ENGINE=libfuzzer"}, ) alias( name = "apple", actual = select( { ":darwin": ":darwin", ":darwin_x86_64": ":darwin_x86_64", ":ios_arm64": ":ios_arm64", ":ios_arm64e": ":ios_arm64e", ":ios_armv7": ":ios_armv7", ":ios_armv7s": ":ios_armv7s", ":ios_i386": ":ios_i386", ":ios_x86_64": ":ios_x86_64", # If we're not on an apple platform return a value that will never match in the select() statement calling this # since it would have already been matched above. "//conditions:default": ":darwin", }, ), ) alias( name = "linux", actual = select( { ":linux_x86_64": ":linux_x86_64", ":linux_aarch64": ":linux_aarch64", ":linux_ppc": ":linux_ppc", ":linux_s390x": "linux_s390x", ":linux_mips64": ":linux_mips64", # If we're not on an linux platform return a value that will never match in the select() statement calling this # since it would have already been matched above. "//conditions:default": ":linux_x86_64", }, ), ) alias( name = "x86", actual = select( { ":darwin_x86_64": ":darwin_x86_64", ":ios_x86_64": "ios_x86_64", "linux_x86_64": "linux_x86_64", "windows_x86_64": "windows_x86_64", # If we're not on an x86 platform return a value that will never match in the select() statement calling this since it would have already been matched above. "//conditions:default": ":darwin_x86_64", }, ), ) alias( name = "remote_jdk11", actual = "@bazel_tools//tools/jdk:remote_jdk11", ) ================================================ FILE: bazel/DEVELOPER.md ================================================ # Developer guide for writing Envoy Bazel rules When adding or maintaining Envoy binary, library and test targets, it's necessary to write or modify Bazel `BUILD` files. In general, each directory has a `BUILD` file covering the source files contained immediately in the directory. Some guidelines for defining new targets using the [custom Envoy build rules](../bazel/envoy_build_system.bzl) are provided below. The [Bazel BUILD Encyclopedia](https://bazel.build/versions/master/docs/be/overview.html) provides further details regarding the underlying rules. ## Style guide The [BUILD file style guide](https://bazel.build/versions/master/docs/skylark/build-style.html) is the canonical style reference. The [buildifier](https://github.com/bazelbuild/buildifier) tool automatically enforces these guidelines. In addition, within the `BUILD` file, targets should be sorted alphabetically by their `name` attribute. ## Adding files to the Envoy build All modules that make up the Envoy binary are statically linked at compile time. Many of the modules within Envoy have a pure virtual interface living in [`include/envoy`](../include/envoy), implementation sources in [`source`](../source), mocks in [`test/mocks`](../test/mocks) and unit/integration tests in [`test`](../test). The relevant `BUILD` files will require updating or to be added in these locations as you extend Envoy. As an example, consider adding the following interface in `include/envoy/foo/bar.h`: ```c++ #pragma once #include "envoy/buffer/buffer.h" #include "envoy/foo/baz.h" class Bar { public: virtual ~Bar() = default; virtual void someThing() PURE; ... ``` This would require the addition to `include/envoy/foo/BUILD` of the following target: ```python envoy_cc_library( name = "bar_interface", hdrs = ["bar.h"], deps = [ ":baz_interface", "//include/envoy/buffer:buffer_interface", ], ) ``` This declares a new target `bar_interface`, where the convention is that pure virtual interfaces have their targets suffixed with `_interface`. The header `bar.h` is exported to other targets that depend on `//include/envoy/foo:bar_interface`. The interface target itself depends on `baz_interface` (in the same directory, hence the relative Bazel label) and `buffer_interface`. In general, any header included via `#include` in a file belonging to the union of the `hdrs` and `srcs` lists for a Bazel target X should appear directly in the exported `hdrs` list for some target Y listed in the `deps` of X. Continuing the above example, the implementation of `Bar` might take place in `source/common/foo/bar_impl.h`, e.g. ```c++ #pragma once #include "envoy/foo/bar.h" class BarImpl : public Bar { ... ``` and `source/common/foo/bar_impl.cc`: ```c++ #include "common/foo/bar_impl.h" #include "common/buffer/buffer_impl.h" #include "common/foo/bar_internal.h" #include "common/foo/baz_impl.h" ... ``` The corresponding target to be added to `source/common/foo/BUILD` would be: ```python envoy_cc_library( name = "bar_lib", srcs = [ "bar_impl.cc", "bar_internal.h", ], hdrs = ["bar_impl.h"], deps = [ ":baz_lib", "//include/envoy/foo:bar_interface", "//source/common/buffer:buffer_lib", ], ) ``` By convention, Bazel targets for internal implementation libraries are suffixed with `_lib`. Similar to the above, a test mock target might be declared for `test/mocks/foo/mocks.h` in `test/mocks/foo/BUILD` with: ```python envoy_cc_mock( name = "foo_mocks", srcs = ["mocks.cc"], hdrs = ["mocks.h"], deps = [ "//include/envoy/foo:bar_interface", ... ], ) ``` Typically, mocks are provided for all interfaces in a directory in a single `mocks.{cc,h}` and corresponding `_mocks` Bazel target. There are some exceptions, such as [test/mocks/upstream/BUILD](../test/mocks/upstream/BUILD), where more granular mock targets are defined. Unit tests for `BarImpl` would be written in `test/common/foo/bar_impl_test.cc` and a target added to `test/common/foo/BUILD`: ```python envoy_cc_test( name = "bar_impl_test", srcs = ["bar_impl_test.cc"], deps = [ "//test/mocks/buffer:buffer_mocks", "//source/common/foo:bar_lib", ... ], ) ``` ## Binary targets New binary targets, for example tools that make use of some Envoy libraries, can be added with the `envoy_cc_binary` rule, e.g. for a new `tools/hello/world.cc` that depends on `bar_lib`, we might have in `tools/hello/BUILD`: ```python envoy_cc_binary( name = "world", srcs = ["world.cc"], deps = [ "//source/common/foo:bar_lib", ], ) ``` ## Filter linking Filters are registered via static initializers at early runtime by modules in [`source/server/config`](../source/server/config). These require the `alwayslink = 1` attribute to be set in the corresponding `envoy_cc_library` target to ensure they are correctly linked. See [`source/server/config/http/BUILD`](../source/server/config/http/BUILD) for examples. ## Tests with environment dependencies Some tests depends on read-only data files. In general, these can be specified by adding a `data = ["some_file.csv", ...],` attribute to the `envoy_cc_test` target, e.g. ```python envoy_cc_test( name = "bar_impl_test", srcs = ["bar_impl_test.cc"], data = ["some_file.csv"], deps = [ "//test/mocks/buffer:buffer_mocks", "//source/common/foo:bar_lib", ... ], ) ``` A [glob function](https://bazel.build/versions/master/docs/be/functions.html#glob) is available for simple pattern matching. Within a test, the read-only data dependencies can be accessed via the [`TestEnvironment::runfilesPath()`](../test/test_common/environment.h) method. A writable path is provided for test temporary files by [`TestEnvironment::temporaryDirectory()`](../test/test_common/environment.h). Integration tests might rely on JSON files that require paths for writable temporary files and paths for file-based Unix Domain Sockets to be specified in the JSON. Jinja-style `{{ test_tmpdir }}` and `{{ test_udsdir }}` macros can be used as placeholders, with the substituted JSON files made available in [`TestEnvironment::temporaryDirectory()`](../test/test_common/environment.h) by the `envoy_cc_test_with_json` rule, e.g. ```python envoy_cc_test_with_json( name = "bar_integration_test", srcs = ["bar_integration_test.cc"], jsons = ["//test/config/integration:server.json"], deps = [ "//source/server:server_lib", ... ], ) ``` In general, the `setup_cmds` attribute can be used to declare a setup shell script that executes in the [test environment](https://bazel.build/versions/master/docs/test-encyclopedia.html#initial-conditions) prior to the test, see [`bazel/envoy_build_system.bzl`](envoy_build_system.bzl) for further details. ================================================ FILE: bazel/EXTERNAL_DEPS.md ================================================ # Choosing tarballs Where the dependency maintainer provides a tarball, prefer that over the automatically generated Github tarball. Github generated tarball SHA256 values can change when Github change their tar/gzip libraries breaking builds. Maintainer provided tarballs are more stable and the maintainer can provide the SHA256. # Adding external dependencies to Envoy (C++) ## Native Bazel This is the preferred style of adding dependencies that use Bazel for their build process. 1. Define a new Bazel repository in [`bazel/repositories.bzl`](repositories.bzl), in the `envoy_dependencies()` function. 2. Reference your new external dependency in some `envoy_cc_library` via the `external_deps` attribute. 3. `bazel test //test/...` ## External CMake (preferred) This is the preferred style of adding dependencies that use CMake for their build system. 1. Define a the source Bazel repository in [`bazel/repositories.bzl`](repositories.bzl), in the `envoy_dependencies()` function. 2. Add a `cmake_external` rule to [`bazel/foreign_cc/BUILD`](foreign_cc/BUILD). This will reference the source repository in step 1. 3. Reference your new external dependency in some `envoy_cc_library` via the name bound in step 1 `external_deps` attribute. 4. `bazel test //test/...` ## genrule repository This is the newer style of adding dependencies with no upstream Bazel configs. It wraps the dependency's native build tooling in a Bazel-aware shell script, installing to a Bazel-managed prefix. The shell script is executed by Bash, with a few Bazel-specific extensions. See the [Bazel docs for "genrule"](https://docs.bazel.build/versions/master/be/general.html#genrule) for details on Bazel's shell extensions. 1. Add a BUILD file in [`bazel/external/`](external/), using a `genrule` target to build the dependency. Please do not add BUILD logic that replaces the dependency's upstream build tooling. 2. Define a new Bazel repository in [`bazel/repositories.bzl`](repositories.bzl), in the `envoy_dependencies()` function. The repository may use `genrule_repository` from [`bazel/genrule_repository.bzl`](genrule_repository.bzl) to place large genrule shell commands into a separate file. 3. Reference your new external dependency in some `envoy_cc_library` via Y in the `external_deps` attribute. 4. `bazel test //test/...` Dependencies between external libraries can use the standard Bazel dependency resolution logic, using the `$(location)` shell extension to resolve paths to binaries, libraries, headers, etc. # Adding external dependencies to Envoy (Python) Python dependencies should be added via `pip3` and `rules_python`. The process is: 1. Define a `pip3_import()` pointing at your target `requirements.txt` in [`bazel/repositories_extra.bzl`](repositories_extra.bzl) 2. Add a `pip_install()` invocation in [`bazel/dependency_imports.bzl`](dependency_imports.bzl). 3. Add a `requirements(" $ HEAPPROFILE=/tmp/mybin.heapprof bazel-bin/source/exe/envoy-static `CPUPROFILE` or `HEAPPROFILE` sets a location for the profiler output. (See *Methodology*.) There are several other environment variables that can be set to tweak the behavior of gperftools. See https://gperftools.github.io/gperftools/ for more details. ### Analyzing the profile [pprof](https://github.com/google/pprof) can be used to symbolize CPU and heap profiles. For example: $ pprof -text bazel-bin/source/exe/envoy-static /tmp/mybin.cpuprof ## Collecting CPU or heap profile for the full execution of a test target The profiler library is automatically linked into envoy_cc_test targets. Run a test with heap profiling enabled, like so: $ bazel test --test_env=HEAPPROFILE=/tmp/heapprof Run a test with CPU profiling enabled, like so: $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof Note that heap checks and heap profile collection in tests have noticiable performance implications. Use the following command to collect a CPU profile from a test target with heap check and heap profile collection disabled: $ bazel test --test_env=CPUPROFILE=/tmp/cpuprof --test_env=HEAPPROFILE= --test_env=HEAPCHECK= ## Starting and stopping profile programmatically ### Add `tcmalloc_dep` dependency to envoy_cc_library rules It is possible to start/stop the CPU or heap profiler programmatically. The [Gperftools CPU Profiler](https://gperftools.github.io/gperftools/cpuprofile.html) is controlled by `ProfilerStart()`/`ProfilerStop()`, and the [Gperftools Heap Profiler](https://gperftools.github.io/gperftools/heapprofile.html) is controlled by `HeapProfilerStart()`, `HeapProfilerStop()` and `HeapProfilerDump()`. These functions are wrapped by Envoy objects defined in [`source/common/profiler/profiler.h`](https://github.com/envoyproxy/envoy/blob/master/source/common/profiler/profiler.h)). To enable profiling programmatically: 1. Add a library dependency on "//source/common/profiler:profiler_lib" to your envoy_cc_library build rule. 2. Use the `startProfiler`/`stopProfiler` methods of `Envoy::Profiler::Cpu` or `Envoy::Profiler::Heap` to collect a profile. Note that `startProfiler` should only be called if no other profile of that type is currently active (e.i. `profilerEnabled()` returns false). Example: ```c++ // includes #include "common/profiler/profiler.h" ... Function(...) { if (!Profiler::Cpu::startProfiler(profile_path)) { // Error handling } ... Do expensive stuff in one or more threads. ... // Stop the profiler and dump output to the `profile_path` specified when profile was started. Profiler::Cpu::stopProfiler(); } ``` ## Memory Profiling in Tests To support memory leaks detection, tests are built with gperftools dependencies enabled by default. ### Enabling Memory Profiling in Tests Use `HeapProfilerStart()`, `HeapProfilerStop()`, and `HeapProfilerDump()` to start, stop, and persist memory dumps, respectively. Please see [above](#adding-tcmalloc_dep-to-envoy) for more details. ### Bazel Configuration By default, bazel executes tests in a sandbox, which will be deleted together with memory dumps after the test run. To preserve memory dumps, bazel can be forced to run tests without sandboxing, by setting the ```TestRunner``` parameter to ```local```: ``` bazel test --strategy=TestRunner=local ... ``` An alternative is to set ```HEAPPROFILE``` environment variable for the test runner: ``` bazel test --test_env=HEAPPROFILE=/tmp/testprofile ... ``` # Methodology For consistent testing, it makes sense to run Envoy for a constant amount of time across trials: $ timeout bazel-bin/source/exe/envoy Envoy will print to stdout something like: Starting tracking the heap And then a series of stdouts like: Dumping heap profile to (100 MB currently in use) Dumping heap profile to (200 MB currently in use) ... This will generate a series of files; if you statically-linked, these are wherever `HEAPPROFILE` points to. Otherwise, they are in the current directory by default. They'll be named something like `main_common_base.0001.heap`, `main_common_base.0002.heap`, etc. *NB:* There is no reason this needs to be titled `main_common_base`. Whatever flag you supply `HeapProfilerStart` / `HeapProfilerDump` will become the filename. Multiple sections of code could be profiled simultaneously by setting multiple `HeapProfilerStart()` / `HeapProfilerStop()` breakpoints with unique identifiers. # Analyzing with `pprof` [pprof](https://github.com/google/pprof) can read these heap files in a number of ways. Most convenient for first-order inspection might be `pprof -top` or `pprof -text`: $ pprof -text bazel-bin/source/exe/envoy main_common_base* | head -n5 File: envoy Build ID: ... Type: inuse_space Showing nodes accounting for 6402800.62kB, 98.59% of 6494044.58kB total Dropped ... nodes (cum <= ...kB) More complex flame/graph charts can be generated and viewed in a browser, which is often more helpful than text-based output: $ pprof -http=localhost:9999 bazel-bin/source/exe/envoy main_common_base* ================================================ FILE: bazel/README.md ================================================ # Building Envoy with Bazel ## Installing Bazelisk as Bazel It is recommended to use [Bazelisk](https://github.com/bazelbuild/bazelisk) installed as `bazel`, to avoid Bazel compatibility issues. On Linux, run the following commands: ``` sudo wget -O /usr/local/bin/bazel https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-linux-amd64 sudo chmod +x /usr/local/bin/bazel ``` On macOS, run the following command: ``` brew install bazelisk ``` On Windows, run the following commands: ``` mkdir %USERPROFILE%\bazel powershell Invoke-WebRequest https://github.com/bazelbuild/bazelisk/releases/latest/download/bazelisk-windows-amd64.exe -OutFile %USERPROFILE%\bazel\bazel.exe set PATH=%PATH%;%USERPROFILE%\bazel ``` If you're building from an revision of Envoy prior to August 2019, which doesn't contains a `.bazelversion` file, run `ci/run_envoy_docker.sh "bazel version"` to find the right version of Bazel and set the version to `USE_BAZEL_VERSION` environment variable to build. ## Production environments To build Envoy with Bazel in a production environment, where the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/install/building.html#requirements) are typically independently sourced, the following steps should be followed: 1. Configure, build and/or install the [Envoy dependencies](https://www.envoyproxy.io/docs/envoy/latest/install/building.html#requirements). 1. `bazel build -c opt //source/exe:envoy-static` from the repository root. ## Quick start Bazel build for developers This section describes how to and what dependencies to install to get started building Envoy with Bazel. If you would rather use a pre-build Docker image with required tools installed, skip to [this section](#building-envoy-with-the-ci-docker-image). As a developer convenience, a [WORKSPACE](https://github.com/envoyproxy/envoy/blob/master/WORKSPACE) and [rules for building a recent version](https://github.com/envoyproxy/envoy/blob/master/bazel/repositories.bzl) of the various Envoy dependencies are provided. These are provided as is, they are only suitable for development and testing purposes. The specific versions of the Envoy dependencies used in this build may not be up-to-date with the latest security patches. See [this doc](https://github.com/envoyproxy/envoy/blob/master/bazel/EXTERNAL_DEPS.md#updating-an-external-dependency-version) for how to update or override dependencies. 1. Install external dependencies. ### Ubuntu On Ubuntu, run the following: ``` sudo apt-get install \ libtool \ cmake \ automake \ autoconf \ make \ ninja-build \ curl \ unzip \ virtualenv ``` ### Fedora On Fedora (maybe also other red hat distros), run the following: ``` dnf install cmake libtool libstdc++ libstdc++-static libatomic ninja-build lld patch aspell-en ``` ### Linux On Linux, we recommend using the prebuilt Clang+LLVM package from [LLVM official site](http://releases.llvm.org/download.html). Extract the tar.xz and run the following: ``` bazel/setup_clang.sh ``` This will setup a `clang.bazelrc` file in Envoy source root. If you want to make clang as default, run the following: ``` echo "build --config=clang" >> user.bazelrc ``` Note: Either `libc++` or `libstdc++-7-dev` (or higher) must be installed. These are typically available via a package manager, but may not be available in default repositories depending on OS version. To build against `libc++` build with the `--config=libc++` instead of the `--config=clang` flag. ### macOS On macOS, you'll need to install several dependencies. This can be accomplished via [Homebrew](https://brew.sh/): ``` brew install coreutils wget cmake libtool go bazel automake ninja clang-format autoconf aspell ``` _notes_: `coreutils` is used for `realpath`, `gmd5sum` and `gsha256sum` The full version of Xcode (not just Command Line Tools) is also required to build Envoy on macOS. Envoy compiles and passes tests with the version of clang installed by Xcode 11.1: Apple clang version 11.0.0 (clang-1100.0.33.8). In order for bazel to be aware of the tools installed by brew, the PATH variable must be set for bazel builds. This can be accomplished by setting this in your `user.bazelrc` file: ``` build --action_env=PATH="/usr/local/bin:/opt/local/bin:/usr/bin:/bin" ``` Alternatively, you can pass `--action_env` on the command line when running `bazel build`/`bazel test`. Having the binutils keg installed in Brew is known to cause issues due to putting an incompatible version of `ar` on the PATH, so if you run into issues building third party code like luajit consider uninstalling binutils. ### Windows Install bazelisk in the PATH using the `bazel.exe` executable name as described above in the first section. When building Envoy, Bazel creates very long path names. One way to work around these excessive path lengths is to change the output base directory for bazel to a very short root path. The CI pipeline for Windows uses `C:\_eb` as the bazel base path. This and other preferences should be set up by placing the following bazelrc configuration line in a system `%ProgramData%\bazel.bazelrc` file or the individual user's `%USERPROFILE%\.bazelrc` file (rather than including it on every bazel command line): ``` startup --output_base=C:/_eb ``` Bazel also creates file symlinks when building Envoy. It's strongly recommended to enable file symlink support using [Bazel's instructions](https://docs.bazel.build/versions/master/windows.html#enable-symlink-support). For other common issues, see the [Using Bazel on Windows](https://docs.bazel.build/versions/master/windows.html) page. [python3](https://www.python.org/downloads/): Specifically, the Windows-native flavor distributed by python.org. The POSIX flavor available via MSYS2, the Windows Store flavor and other distributions will not work. Add a symlink for `python3.exe` pointing to the installed `python.exe` for Envoy scripts and Bazel rules which follow POSIX python conventions. Add `pip.exe` to the PATH and install the `wheel` package. ``` mklink %USERPROFILE%\Python38\python3.exe %USERPROFILE%\Python38\python.exe set PATH=%PATH%;%USERPROFILE%\Python38 set PATH=%PATH%;%USERPROFILE%\Python38\Scripts pip install wheel ``` [Build Tools for Visual Studio 2019](https://visualstudio.microsoft.com/downloads/#build-tools-for-visual-studio-2019): For building with MSVC (the `msvc-cl` config option), you must install at least the VC++ workload. You may alternately install the entire Visual Studio 2019 and use the Build Tools installed in that package. Earlier versions of VC++ Build Tools/Visual Studio are not recommended or supported. If installed in a non-standard filesystem location, be sure to set the `BAZEL_VC` environment variable to the path of the VC++ package to allow Bazel to find your installation of VC++. NOTE: ensure that the `link.exe` that resolves on your PATH is from VC++ Build Tools and not `/usr/bin/link.exe` from MSYS2, which is determined by their relative ordering in your PATH. ``` set BAZEL_VC=%USERPROFILE%\VSBT2019\VC set PATH=%PATH%;%USERPROFILE%\VSBT2019\VC\Tools\MSVC\14.26.28801\bin\Hostx64\x64 ``` Ensure `CMake` and `ninja` binaries are on the PATH. The versions packaged with VC++ Build Tools are sufficient in most cases, but are 32 bit binaries. These flavors will not run in the project's GCP CI remote build environment, so 64 bit builds from the CMake and ninja projects are used instead. ``` set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin set PATH=%PATH%;%USERPROFILE%\VSBT2019\Common7\IDE\CommonExtensions\Microsoft\CMake\Ninja ``` [MSYS2 shell](https://msys2.github.io/): Install to a path with no spaces, e.g. C:\msys32. Set the `BAZEL_SH` environment variable to the path of the installed MSYS2 `bash.exe` executable. Additionally, setting the `MSYS2_ARG_CONV_EXCL` environment variable to a value of `*` is often advisable to ensure argument parsing in the MSYS2 shell behaves as expected. ``` set PATH=%PATH%;%USERPROFILE%\msys64\usr\bin set BAZEL_SH=%USERPROFILE%\msys64\usr\bin\bash.exe set MSYS2_ARG_CONV_EXCL=* ``` Set the `TMPDIR` environment variable to a path usable as a temporary directory (e.g. `C:\Windows\TEMP`), and create a directory symlink `C:\c` to `C:\`, so that the MSYS2 path `/c/Windows/TEMP` is equivalent to the Windows path `C:\Windows\TEMP`: ``` set TMPDIR=C:\Windows\TEMP mklink /d C:\c C:\ ``` The TMPDIR path and MSYS2 `mktemp` command are used frequently by the `rules_foreign_cc` component of Bazel as well as Envoy's test scripts, causing problems if not set to a path accessible to both Windows and msys commands. [Note the `ci/windows_ci_steps.sh` script which builds envoy and run tests in CI creates this symlink automatically.] In the MSYS2 shell, install additional packages via pacman: ``` pacman -S diffutils patch unzip zip ``` [Git](https://git-scm.com/downloads): This version from the Git project, or the version distributed using pacman under MSYS2 will both work, ensure one is on the PATH:. ``` set PATH=%PATH%;%USERPROFILE%\Git\bin ``` Lastly, persist environment variable changes. NOTE: The paths in this document are given as examples, make sure to verify you are using the correct paths for your environment. Also note that these examples assume using a `cmd.exe` shell to set environment variables etc., be sure to do the equivalent if using a different shell. ``` setx PATH "%PATH%" setx BAZEL_SH "%BAZEL_SH%" setx MSYS2_ARG_CONV_EXCL "%MSYS2_ARG_CONV_EXCL%" setx BAZEL_VC "%BAZEL_VC%" setx TMPDIR "%TMPDIR%" ``` 1. Install Golang on your machine. This is required as part of building [BoringSSL](https://boringssl.googlesource.com/boringssl/+/HEAD/BUILDING.md) and also for [Buildifer](https://github.com/bazelbuild/buildtools) which is used for formatting bazel BUILD files. 1. `go get -u github.com/bazelbuild/buildtools/buildifier` to install buildifier. You may need to set `BUILDIFIER_BIN` to `$GOPATH/bin/buildifier` in your shell for buildifier to work. 1. `go get -u github.com/bazelbuild/buildtools/buildozer` to install buildozer. You may need to set `BUILDOZER_BIN` to `$GOPATH/bin/buildozer` in your shell for buildozer to work. 1. `bazel build //source/exe:envoy-static` from the Envoy source directory. Add `-c opt` for an optimized release build or `-c dbg` for an unoptimized, fully instrumented debugging build. ## Building Envoy with the CI Docker image Envoy can also be built with the Docker image used for CI, by installing Docker and executing the following. On Linux, run: ``` ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' ``` From a Windows host with Docker installed, the Windows containers feature enabled, and bash (installed via MSYS2 or Git bash), run: ``` ./ci/run_envoy_docker.sh './ci/windows_ci_steps.sh' ``` See also the [documentation](https://github.com/envoyproxy/envoy/tree/master/ci) for developer use of the CI Docker image. ## Building Envoy with Remote Execution Envoy can also be built with Bazel [Remote Execution](https://docs.bazel.build/versions/master/remote-execution.html), part of the CI is running with the hosted [GCP RBE](https://blog.bazel.build/2018/10/05/remote-build-execution.html) service. To build Envoy with a remote build services, run Bazel with your remote build service flags and with `--config=remote-clang`. For example the following command runs build with the GCP RBE service used in CI: ``` bazel build //source/exe:envoy-static --config=remote-clang \ --remote_cache=grpcs://remotebuildexecution.googleapis.com \ --remote_executor=grpcs://remotebuildexecution.googleapis.com \ --remote_instance_name=projects/envoy-ci/instances/default_instance ``` Change the value of `--remote_cache`, `--remote_executor` and `--remote_instance_name` for your remote build services. Tests can be run in remote execution too. Note: Currently the test run configuration in `.bazelrc` doesn't download test binaries and test logs, to override the behavior set [`--experimental_remote_download_outputs`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_remote_download_outputs) accordingly. ## Building Envoy with Docker sandbox Building Envoy with Docker sandbox uses the same Docker image used in CI with fixed C++ toolchain configuration. It produces more consistent output which is not depending on your local C++ toolchain. It can also help debugging issues with RBE. To build Envoy with Docker sandbox: ``` bazel build //source/exe:envoy-static --config=docker-clang ``` Tests can be run in docker sandbox too. Note that the network environment, such as IPv6, may be different in the docker sandbox so you may want set different options. See below to configure test IP versions. ## Linking against libc++ on Linux To link Envoy against libc++, follow the [quick start](#quick-start-bazel-build-for-developers) to setup Clang+LLVM and run: ``` bazel build --config=libc++ //source/exe:envoy-static ``` Or use our configuration with Remote Execution or Docker sandbox, pass `--config=remote-clang-libc++` or `--config=docker-clang-libc++` respectively. If you want to make libc++ as default, add a line `build --config=libc++` to the `user.bazelrc` file in Envoy source root. ## Using a compiler toolchain in a non-standard location By setting the `CC` and `LD_LIBRARY_PATH` in the environment that Bazel executes from as appropriate, an arbitrary compiler toolchain and standard library location can be specified. One slight caveat is that (at the time of writing), Bazel expects the binutils in `$(dirname $CC)` to be unprefixed, e.g. `as` instead of `x86_64-linux-gnu-as`. Note: this configuration currently doesn't work with Remote Execution or Docker sandbox, you have to generate a custom toolchains configuration for them. See [bazelbuild/bazel-toolchains](https://github.com/bazelbuild/bazel-toolchains) for more details. ## Supported compiler versions We now require Clang >= 5.0 due to known issues with std::string thread safety and C++14 support. GCC >= 7 is also known to work. Currently the CI is running with Clang 10. ## Clang STL debug symbols By default Clang drops some debug symbols that are required for pretty printing to work correctly. More information can be found [here](https://bugs.llvm.org/show_bug.cgi?id=24202). The easy solution is to set ```--copt=-fno-limit-debug-info``` on the CLI or in your .bazelrc file. ## Removing debug info If you don't want your debug or release binaries to contain debug info to reduce binary size, pass `--define=no_debug_info=1` when building. This is primarily useful when building envoy as a static library. When building a linked envoy binary you can build the implicit `.stripped` target from [`cc_binary`](https://docs.bazel.build/versions/master/be/c-cpp.html#cc_binary) or pass [`--strip=always`](https://docs.bazel.build/versions/master/command-line-reference.html#flag--strip) instead. # Testing Envoy with Bazel All the Envoy tests can be built and run with: ``` bazel test //test/... ``` An individual test target can be run with a more specific Bazel [label](https://bazel.build/versions/master/docs/build-ref.html#Labels), e.g. to build and run only the units tests in [test/common/http/async_client_impl_test.cc](https://github.com/envoyproxy/envoy/blob/master/test/common/http/async_client_impl_test.cc): ``` bazel test //test/common/http:async_client_impl_test ``` To observe more verbose test output: ``` bazel test --test_output=streamed //test/common/http:async_client_impl_test ``` It's also possible to pass into an Envoy test additional command-line args via `--test_arg`. For example, for extremely verbose test debugging: ``` bazel test --test_output=streamed //test/common/http:async_client_impl_test --test_arg="-l trace" ``` By default, testing exercises both IPv4 and IPv6 address connections. In IPv4 or IPv6 only environments, set the environment variable ENVOY_IP_TEST_VERSIONS to "v4only" or "v6only", respectively. ``` bazel test //test/... --test_env=ENVOY_IP_TEST_VERSIONS=v4only bazel test //test/... --test_env=ENVOY_IP_TEST_VERSIONS=v6only ``` By default, tests are run with the [gperftools](https://github.com/gperftools/gperftools) heap checker enabled in "normal" mode to detect leaks. For other mode options, see the gperftools heap checker [documentation](https://gperftools.github.io/gperftools/heap_checker.html). To disable the heap checker or change the mode, set the HEAPCHECK environment variable: ``` # Disables the heap checker bazel test //test/... --test_env=HEAPCHECK= # Changes the heap checker to "minimal" mode bazel test //test/... --test_env=HEAPCHECK=minimal ``` If you see a leak detected, by default the reported offsets will require `addr2line` interpretation. You can run under `--config=clang-asan` to have this automatically applied. Bazel will by default cache successful test results. To force it to rerun tests: ``` bazel test //test/common/http:async_client_impl_test --cache_test_results=no ``` Bazel will by default run all tests inside a sandbox, which disallows access to the local filesystem. If you need to break out of the sandbox (for example to run under a local script or tool with [`--run_under`](https://docs.bazel.build/versions/master/user-manual.html#flag--run_under)), you can run the test with `--strategy=TestRunner=local`, e.g.: ``` bazel test //test/common/http:async_client_impl_test --strategy=TestRunner=local --run_under=/some/path/foobar.sh ``` # Stack trace symbol resolution Envoy can produce backtraces on demand and from assertions and other fatal actions like segfaults. Where supported, stack traces will contain resolved symbols, though not include line numbers. On systems where absl::Symbolization is not supported, the stack traces written in the log or to stderr contain addresses rather than resolved symbols. If the symbols were resolved, the address is also included at the end of the line. The `tools/stack_decode.py` script exists to process the output and do additional symbol resolution including file names and line numbers. It requires the `addr2line` program be installed and in your path. Any log lines not relevant to the backtrace capability are passed through the script unchanged (it acts like a filter). File and line information is appended to the stack trace lines. The script runs in one of two modes. To process log input from stdin, pass `-s` as the first argument, followed by the executable file path. You can postprocess a log or pipe the output of an Envoy process. If you do not specify the `-s` argument it runs the arguments as a child process. This enables you to run a test with backtrace post processing. Bazel sandboxing must be disabled by specifying local execution. Example command line with `run_under`: ``` bazel test -c dbg //test/server:backtrace_test --run_under=`pwd`/tools/stack_decode.py --strategy=TestRunner=local --cache_test_results=no --test_output=all ``` Example using input on stdin: ``` bazel test -c dbg //test/server:backtrace_test --cache_test_results=no --test_output=streamed |& tools/stack_decode.py -s bazel-bin/test/server/backtrace_test ``` You will need to use either a `dbg` build type or the `opt` build type to get file and line symbol information in the binaries. By default main.cc will install signal handlers to print backtraces at the location where a fatal signal occurred. The signal handler will re-raise the fatal signal with the default handler so a core file will still be dumped after the stack trace is logged. To inhibit this behavior use `--define=signal_trace=disabled` on the Bazel command line. No signal handlers will be installed. # Running a single Bazel test under GDB ``` bazel build -c dbg //test/common/http:async_client_impl_test bazel build -c dbg //test/common/http:async_client_impl_test.dwp gdb bazel-bin/test/common/http/async_client_impl_test ``` We need to use `-c dbg` Bazel option to generate debugging symbols and without that GDB will not be very useful. The debugging symbols are stored as separate debugging information files (`.dwo` files) and we can build a DWARF package file with `.dwp ` target. The `.dwp` file need to be presented in the same folder with the binary for a full debugging experience. # Running Bazel tests requiring privileges Some tests may require privileges (e.g. CAP_NET_ADMIN) in order to execute. One option is to run them with elevated privileges, e.g. `sudo test`. However, that may not always be possible, particularly if the test needs to run in a CI pipeline. `tools/bazel-test-docker.sh` may be used in such situations to run the tests in a privileged docker container. The script works by wrapping the test execution in the current repository's circle ci build container, then executing it either locally or on a remote docker container. In both cases, the container runs with the `--privileged` flag, allowing it to execute operations which would otherwise be restricted. The command line format is: `tools/bazel-test-docker.sh [optional-flags-to-bazel]` The script uses two optional environment variables to control its behaviour: * `RUN_REMOTE=`: chooses whether to run on a remote docker server. * `LOCAL_MOUNT=`: copy/mount local libraries onto the docker container. Use `RUN_REMOTE=yes` when you don't want to run against your local docker instance. Note that you will need to override a few environment variables to set up the remote docker. The list of variables can be found in the [Documentation](https://docs.docker.com/engine/reference/commandline/cli/). Use `LOCAL_MOUNT=yes` when you are not building with the Envoy build container. This will ensure that the libraries against which the tests dynamically link will be available and of the correct version. ## Examples Running the http integration test in a privileged container: ```bash tools/bazel-test-docker.sh //test/integration:integration_test --jobs=4 -c dbg ``` Running the http integration test compiled locally against a privileged remote container: ```bash setup_remote_docker_variables RUN_REMOTE=yes MOUNT_LOCAL=yes tools/bazel-test-docker.sh //test/integration:integration_test \ --jobs=4 -c dbg ``` # Additional Envoy build and test options In general, there are 3 [compilation modes](https://docs.bazel.build/versions/master/user-manual.html#flag--compilation_mode) that Bazel supports: * `fastbuild`: `-O0`, aimed at developer speed (default). * `opt`: `-O2 -DNDEBUG -ggdb3 -gsplit-dwarf`, for production builds and performance benchmarking. * `dbg`: `-O0 -ggdb3 -gsplit-dwarf`, no optimization and debug symbols. You can use the `-c ` flag to control this, e.g. ``` bazel build -c opt //source/exe:envoy-static ``` To override the compilation mode and optimize the build for binary size, you can use the `sizeopt` configuration: ``` bazel build //source/exe:envoy-static --config=sizeopt ``` ## Sanitizers To build and run tests with the gcc compiler's [address sanitizer (ASAN)](https://github.com/google/sanitizers/wiki/AddressSanitizer) and [undefined behavior (UBSAN)](https://developers.redhat.com/blog/2014/10/16/gcc-undefined-behavior-sanitizer-ubsan) sanitizer enabled: ``` bazel test -c dbg --config=asan //test/... ``` The ASAN failure stack traces include line numbers as a result of running ASAN with a `dbg` build above. If the stack trace is not symbolized, try setting the ASAN_SYMBOLIZER_PATH environment variable to point to the llvm-symbolizer binary (or make sure the llvm-symbolizer is in your $PATH). If you have clang-5.0 or newer, additional checks are provided with: ``` bazel test -c dbg --config=clang-asan //test/... ``` [Thread sanitizer (TSAN)](https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual) tests rely on a TSAN-instrumented version of libc++ and can be run under the docker sandbox: ``` bazel test -c dbg --config=docker-tsan //test/... ``` Alternatively, you can build a local copy of TSAN-instrumented libc++. Follow the [quick start](#quick-start-bazel-build-for-developers) instruction to setup Clang+LLVM environment. Download LLVM sources from the [LLVM official site](https://github.com/llvm/llvm-project) ``` curl -sSfL "https://github.com/llvm/llvm-project/archive/llvmorg-10.0.0.tar.gz" | tar zx ``` Configure and build a TSAN-instrumented libc++. Please note that `LLVM_USE_SANITIZER=Thread` preprocessor definition is used to enable TSAN instrumentation, and `CMAKE_INSTALL_PREFIX="/opt/libcxx_tsan"` defines the installation directory path. ``` mkdir tsan pushd tsan cmake -GNinja -DLLVM_ENABLE_PROJECTS="libcxxabi;libcxx" -DLLVM_USE_LINKER=lld -DLLVM_USE_SANITIZER=Thread -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_INSTALL_PREFIX="/opt/libcxx_tsan" "../llvm-project-llvmorg-10.0.0/llvm" ninja install-cxx install-cxxabi rm -rf /opt/libcxx_tsan/include ``` Generate local_tsan.bazelrc containing bazel configuration for tsan tests: ``` bazel/setup_local_tsan.sh ``` To execute TSAN tests using the local instrumented libc++ library pass `--config=local-tsan` to bazel: ``` bazel test --config=local-tsan //test/... ``` For [memory sanitizer (MSAN)](https://github.com/google/sanitizers/wiki/MemorySanitizer) testing, it has to be run under the docker sandbox which comes with MSAN instrumented libc++: ``` bazel test -c dbg --config=docker-msan //test/... ``` To run the sanitizers on OS X, prefix `macos-` to the config option, e.g.: ``` bazel test -c dbg --config=macos-asan //test/... ``` ## Log Verbosity Log verbosity is controlled at runtime in all builds. To obtain `nghttp2` traces, you can set `ENVOY_NGHTTP2_TRACE` in the environment for enhanced logging at `-l trace`. For example, in tests: ``` bazel test //test/integration:protocol_integration_test --test_output=streamed \ --test_arg="-l trace" --test_env="ENVOY_NGHTTP2_TRACE=" ``` ## Disabling optional features The following optional features can be disabled on the Bazel build command-line: * Hot restart with `--define hot_restart=disabled` * Google C++ gRPC client with `--define google_grpc=disabled` * Backtracing on signals with `--define signal_trace=disabled` * Active stream state dump on signals with `--define signal_trace=disabled` or `--define disable_object_dump_on_signal_trace=disabled` * tcmalloc with `--define tcmalloc=disabled`. Also you can choose Gperftools' implementation of tcmalloc with `--define tcmalloc=gperftools` which is the default for non-x86 builds. * deprecated features with `--define deprecated_features=disabled` ## Enabling optional features The following optional features can be enabled on the Bazel build command-line: * Exported symbols during linking with `--define exported_symbols=enabled`. This is useful in cases where you have a lua script that loads shared object libraries, such as those installed via luarocks. * Perf annotation with `--define perf_annotation=enabled` (see source/common/common/perf_annotation.h for details). * BoringSSL can be built in a FIPS-compliant mode with `--define boringssl=fips` (see [FIPS 140-2](https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/security/ssl#fips-140-2) for details). * ASSERT() can be configured to log failures and increment a stat counter in a release build with `--define log_debug_assert_in_release=enabled`. The default behavior is to compile debug assertions out of release builds so that the condition is not evaluated. This option has no effect in debug builds. * memory-debugging (scribbling over memory after allocation and before freeing) with `--define tcmalloc=debug`. Note this option cannot be used with FIPS-compliant mode BoringSSL and tcmalloc is built from the sources of Gperftools. * Default [path normalization](https://github.com/envoyproxy/envoy/issues/6435) with `--define path_normalization_by_default=true`. Note this still could be disable by explicit xDS config. * Manual stamping via VersionInfo with `--define manual_stamp=manual_stamp`. This is needed if the `version_info_lib` is compiled via a non-binary bazel rules, e.g `envoy_cc_library`. Otherwise, the linker will fail to resolve symbols that are included via the `linktamp` rule, which is only available to binary targets. This is being tracked as a feature in: https://github.com/envoyproxy/envoy/issues/6859. * Process logging for Android applications can be enabled with `--define logger=android`. * Excluding assertions for known issues with `--define disable_known_issue_asserts=true`. A KNOWN_ISSUE_ASSERT is an assertion that should pass (like all assertions), but sometimes fails for some as-yet unidentified or unresolved reason. Because it is known to potentially fail, it can be compiled out even when DEBUG is true, when this flag is set. This allows Envoy to be run in production with assertions generally enabled, without crashing for known issues. KNOWN_ISSUE_ASSERT should only be used for newly-discovered issues that represent benign violations of expectations. * Envoy can be linked to [`zlib-ng`](https://github.com/zlib-ng/zlib-ng) instead of [`zlib`](https://zlib.net) with `--define zlib=ng`. ## Disabling extensions Envoy uses a modular build which allows extensions to be removed if they are not needed or desired. Extensions that can be removed are contained in [extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). Use the following procedure to customize the extensions for your build: * The Envoy build assumes that a Bazel repository named `@envoy_build_config` exists which contains the file `@envoy_build_config//:extensions_build_config.bzl`. In the default build, a synthetic repository is created containing [extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). Thus, the default build has all extensions. * Start by creating a new Bazel workspace somewhere in the filesystem that your build can access. This workspace should contain: * Empty WORKSPACE file. * Empty BUILD file. * A copy of [extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). * Comment out any extensions that you don't want to build in your file copy. To have your local build use your overridden configuration repository there are two options: 1. Use the [`--override_repository`](https://docs.bazel.build/versions/master/command-line-reference.html) CLI option to override the `@envoy_build_config` repo. 2. Use the following snippet in your WORKSPACE before you load the Envoy repository. E.g., ``` workspace(name = "envoy") local_repository( name = "envoy_build_config", # Relative paths are also supported. path = "/somewhere/on/filesystem/envoy_build_config", ) local_repository( name = "envoy", # Relative paths are also supported. path = "/somewhere/on/filesystem/envoy", ) ... ``` ## Extra extensions If you are building your own Envoy extensions or custom Envoy builds and encounter visibility problems with, you may need to adjust the default visibility rules to be public, as documented in [extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). See the instructions above about how to create your own custom version of [extensions_build_config.bzl](../source/extensions/extensions_build_config.bzl). # Release builds Release builds should be built in `opt` mode, processed with `strip` and have a `.note.gnu.build-id` section with the Git SHA1 at which the build took place. They should also ignore any local `.bazelrc` for reproducibility. This can be achieved with: ``` bazel --bazelrc=/dev/null build -c opt //source/exe:envoy-static.stripped ``` One caveat to note is that the Git SHA1 is truncated to 16 bytes today as a result of the workaround in place for https://github.com/bazelbuild/bazel/issues/2805. # Coverage builds To generate coverage results, make sure you are using a clang toolchain and have `llvm-cov` and `llvm-profdata` in your `PATH`. Then run: ``` test/run_envoy_bazel_coverage.sh ``` The summary results are printed to the standard output and the full coverage report is available in `generated/coverage/coverage.html`. To generate coverage results for fuzz targets, use the `FUZZ_COVERAGE` environment variable, e.g.: ``` FUZZ_COVERAGE=true VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh ``` This generates a coverage report for fuzz targets after running the target for one minute against fuzzing engine libfuzzer using its coprus as initial seed inputs. The full coverage report will be available in `generated/fuzz_coverage/coverage.html`. Coverage for every PR is available in Circle in the "artifacts" tab of the coverage job. You will need to navigate down and open "coverage.html" but then you can navigate per normal. NOTE: We have seen some issues with seeing the artifacts tab. If you can't see it, log out of Circle, and then log back in and it should start working. The latest coverage report for master is available [here](https://storage.googleapis.com/envoy-postsubmit/master/coverage/index.html). The latest fuzz coverage report for master is available [here](https://storage.googleapis.com/envoy-postsubmit/master/fuzz_coverage/index.html). It's also possible to specialize the coverage build to a specified test or test dir. This is useful when doing things like exploring the coverage of a fuzzer over its corpus. This can be done by passing coverage targets as the command-line arguments and using the `VALIDATE_COVERAGE` environment variable, e.g. for a fuzz test: ``` FUZZ_COVERAGE=true VALIDATE_COVERAGE=false test/run_envoy_bazel_coverage.sh //test/common/common:base64_fuzz_test ``` # Cleaning the build and test artifacts `bazel clean` will nuke all the build/test artifacts from the Bazel cache for Envoy proper. To remove the artifacts for the external dependencies run `bazel clean --expunge`. If something goes really wrong and none of the above work to resolve a stale build issue, you can always remove your Bazel cache completely. It is likely located in `~/.cache/bazel`. # Adding or maintaining Envoy build rules See the [developer guide for writing Envoy Bazel rules](DEVELOPER.md). # Bazel performance on (virtual) machines with low resources If the (virtual) machine that is performing the build is low on memory or CPU resources, you can override Bazel's default job parallelism determination with `--jobs=N` to restrict the build to at most `N` simultaneous jobs, e.g.: ``` bazel build --jobs=2 //source/exe:envoy-static ``` # Debugging the Bazel build When trying to understand what Bazel is doing, the `-s` and `--explain` options are useful. To have Bazel provide verbose output on which commands it is executing: ``` bazel build -s //source/exe:envoy-static ``` To have Bazel emit to a text file the rationale for rebuilding a target: ``` bazel build --explain=file.txt //source/exe:envoy-static ``` To get more verbose explanations: ``` bazel build --explain=file.txt --verbose_explanations //source/exe:envoy-static ``` # Resolving paths in bazel build output Sometimes it's useful to see real system paths in bazel error message output (vs. symbolic links). `tools/path_fix.sh` is provided to help with this. See the comments in that file. # Compilation database Run `tools/gen_compilation_database.py` to generate a [JSON Compilation Database](https://clang.llvm.org/docs/JSONCompilationDatabase.html). This could be used with any tools (e.g. clang-tidy) compatible with the format. It is recommended to run this script with `TEST_TMPDIR` set, so the Bazel artifacts doesn't get cleaned up in next `bazel build` or `bazel test`. The compilation database could also be used to setup editors with cross reference, code completion. For example, you can use [You Complete Me](https://valloric.github.io/YouCompleteMe/) or [clangd](https://clangd.llvm.org/) with supported editors. For example, use following command to prepare a compilation database: ``` TEST_TMPDIR=/tmp tools/gen_compilation_database.py ``` # Running clang-format without docker The easiest way to run the clang-format check/fix commands is to run them via docker, which helps ensure the right toolchain is set up. However you may prefer to run clang-format scripts on your workstation directly: * It's possible there is a speed advantage * Docker itself can sometimes go awry and you then have to deal with that * Type-ahead doesn't always work when waiting running a command through docker To run the tools directly, you must install the correct version of clang. This may change over time, check the version of clang in the docker image. You must also have 'buildifier' installed from the bazel distribution. Edit the paths shown here to reflect the installation locations on your system: ```shell export CLANG_FORMAT="$HOME/ext/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang-format" export BUILDIFIER_BIN="/usr/bin/buildifier" ``` Once this is set up, you can run clang-format without docker: ```shell ./tools/code_format/check_format.py check ./tools/spelling/check_spelling.sh check ./tools/code_format/check_format.py fix ./tools/spelling/check_spelling.sh fix ``` # Advanced caching setup Setting up an HTTP cache for Bazel output helps optimize Bazel performance and resource usage when using multiple compilation modes or multiple trees. ## Setup local cache You may use any [Remote Caching](https://docs.bazel.build/versions/master/remote-caching.html) backend as an alternative to this. This requires Go 1.11+, follow the [instructions](https://golang.org/doc/install#install) to install if you don't have one. To start the cache, run the following from the root of the Envoy repository (or anywhere else that the Go toolchain can find the necessary dependencies): ``` go run github.com/buchgr/bazel-remote --dir ${HOME}/bazel_cache --host 127.0.0.1 --port 28080 --max_size 64 ``` See [Bazel remote cache](https://github.com/buchgr/bazel-remote) for more information on the parameters. The command above will setup a maximum 64 GiB cache at `~/bazel_cache` on port 28080. You might want to setup a larger cache if you run ASAN builds. NOTE: Using docker to run remote cache server described in remote cache docs will likely have slower cache performance on macOS due to slow disk performance on Docker for Mac. Adding the following parameter to Bazel everytime or persist them in `.bazelrc`. ``` --remote_http_cache=http://127.0.0.1:28080/ ``` ================================================ FILE: bazel/antlr.patch ================================================ diff --git a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp index c6cceda13..e86533759 100755 --- a/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp +++ b/runtime/Cpp/runtime/src/atn/ATNDeserializer.cpp @@ -104,7 +104,7 @@ void deserializeSets( } -ATNDeserializer::ATNDeserializer(): ATNDeserializer(ATNDeserializationOptions::getDefaultOptions()) { +ATNDeserializer::ATNDeserializer(): ATNDeserializer(ATNDeserializationOptions()) { } ATNDeserializer::ATNDeserializer(const ATNDeserializationOptions& dso): deserializationOptions(dso) { diff --git a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp index 827c3d59f..62914cf55 100755 --- a/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp +++ b/runtime/Cpp/runtime/src/atn/LexerATNSimulator.cpp @@ -69,7 +69,7 @@ void LexerATNSimulator::copyState(LexerATNSimulator *simulator) { } size_t LexerATNSimulator::match(CharStream *input, size_t mode) { - match_calls++; + // match_calls++; _mode = mode; ssize_t mark = input->mark(); ================================================ FILE: bazel/api_binding.bzl ================================================ def _default_envoy_api_impl(ctx): ctx.file("WORKSPACE", "") api_dirs = [ "BUILD", "bazel", "envoy", "examples", "test", "tools", "versioning", ] for d in api_dirs: ctx.symlink(ctx.path(ctx.attr.envoy_root).dirname.get_child(ctx.attr.reldir).get_child(d), d) _default_envoy_api = repository_rule( implementation = _default_envoy_api_impl, attrs = { "envoy_root": attr.label(default = "@envoy//:BUILD"), "reldir": attr.string(), }, ) def envoy_api_binding(): # Treat the data plane API as an external repo, this simplifies exporting # the API to https://github.com/envoyproxy/data-plane-api. This is the # shadow API for Envoy internal use, see #9479. if "envoy_api" not in native.existing_rules().keys(): _default_envoy_api(name = "envoy_api", reldir = "generated_api_shadow") # We also provide the non-shadowed API for developer use (see #9479). if "envoy_api_raw" not in native.existing_rules().keys(): _default_envoy_api(name = "envoy_api_canonical", reldir = "api") # TODO(https://github.com/envoyproxy/envoy/issues/7719) need to remove both bindings and use canonical rules native.bind( name = "api_httpbody_protos", actual = "@com_google_googleapis//google/api:httpbody_cc_proto", ) native.bind( name = "http_api_protos", actual = "@com_google_googleapis//google/api:annotations_cc_proto", ) ================================================ FILE: bazel/api_repositories.bzl ================================================ load("@envoy_api//bazel:repositories.bzl", "api_dependencies") def envoy_api_dependencies(): api_dependencies() ================================================ FILE: bazel/boringssl_static.patch ================================================ diff --git a/BUILD b/BUILD index d7c731bf6..315cdeca0 100644 --- a/BUILD +++ b/BUILD @@ -88,6 +88,7 @@ boringssl_copts = select({ ":windows_x86_64": [ "-DWIN32_LEAN_AND_MEAN", "-DOPENSSL_NO_ASM", + "-DBORINGSSL_IMPLEMENTATION", ], "//conditions:default": ["-DOPENSSL_NO_ASM"], }) @@ -141,6 +142,7 @@ cc_library( ":windows_x86_64": ["-defaultlib:advapi32.lib"], "//conditions:default": ["-lpthread"], }), + linkstatic = True, visibility = ["//visibility:public"], ) @@ -150,6 +152,7 @@ cc_library( hdrs = ssl_headers, copts = boringssl_copts_cxx, includes = ["src/include"], + linkstatic = True, visibility = ["//visibility:public"], deps = [ ":crypto", ================================================ FILE: bazel/coverage/BUILD ================================================ licenses(["notice"]) # Apache 2 # TODO(lizan): Add test for this and upstream to upstream Bazel. filegroup( name = "coverage_support", srcs = ["collect_cc_coverage.sh"], ) exports_files(["fuzz_coverage_wrapper.sh"]) ================================================ FILE: bazel/coverage/collect_cc_coverage.sh ================================================ #!/bin/bash -x # # This is a fork of https://github.com/bazelbuild/bazel/blob/3.1.0/tools/test/collect_cc_coverage.sh # to cover most of use cases in Envoy. # TODO(lizan): Move this to upstream Bazel # # Copyright 2016 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script collects code coverage data for C++ sources, after the tests # were executed. # # Bazel C++ code coverage collection support is poor and limited. There is # an ongoing effort to improve this (tracking issue #1118). # # Bazel uses the lcov tool for gathering coverage data. There is also # an experimental support for clang llvm coverage, which uses the .profraw # data files to compute the coverage report. # # This script assumes the following environment variables are set: # - COVERAGE_DIR Directory containing metadata files needed for # coverage collection (e.g. gcda files, profraw). # - COVERAGE_MANIFEST Location of the instrumented file manifest. # - COVERAGE_GCOV_PATH Location of gcov. This is set by the TestRunner. # - COVERAGE_GCOV_OPTIONS Additional options to pass to gcov. # - ROOT Location from where the code coverage collection # was invoked. # # The script looks in $COVERAGE_DIR for the C++ metadata coverage files (either # gcda or profraw) and uses either lcov or gcov to get the coverage data. # The coverage data is placed in $COVERAGE_OUTPUT_FILE. read -ra COVERAGE_GCOV_OPTIONS <<< "${COVERAGE_GCOV_OPTIONS:-}" # Checks if clang llvm coverage should be used instead of lcov. function uses_llvm() { if stat "${COVERAGE_DIR}"/*.profraw >/dev/null 2>&1; then return 0 fi return 1 } # Returns 0 if gcov must be used, 1 otherwise. function uses_gcov() { [[ "$GCOV_COVERAGE" -eq "1" ]] && return 0 return 1 } function init_gcov() { # Symlink the gcov tool such with a link called gcov. Clang comes with a tool # called llvm-cov, which behaves like gcov if symlinked in this way (otherwise # we would need to invoke it with "llvm-cov gcov"). # For more details see https://llvm.org/docs/CommandGuide/llvm-cov.html. GCOV="${COVERAGE_DIR}/gcov" ln -s "${COVERAGE_GCOV_PATH}" "${GCOV}" } # Computes code coverage data using the clang generated metadata found under # $COVERAGE_DIR. # Writes the collected coverage into the given output file. function llvm_coverage() { local output_file="${1}" object_file object_files object_param=() shift export LLVM_PROFILE_FILE="${COVERAGE_DIR}/%h-%p-%m.profraw" "${COVERAGE_GCOV_PATH}" merge -output "${output_file}.data" \ "${COVERAGE_DIR}"/*.profraw object_files="$(find -L "${RUNFILES_DIR}" -type f -exec file -L {} \; \ | grep ELF | grep -v "LSB core" | sed 's,:.*,,')" for object_file in ${object_files}; do object_param+=(-object "${object_file}") done llvm-cov export -instr-profile "${output_file}.data" -format=lcov \ -ignore-filename-regex='.*external/.+' \ -ignore-filename-regex='/tmp/.+' \ "${object_param[@]}" | sed 's#/proc/self/cwd/##' > "${output_file}" } # Generates a code coverage report in gcov intermediate text format by invoking # gcov and using the profile data (.gcda) and notes (.gcno) files. # # The profile data files are expected to be found under $COVERAGE_DIR. # The notes file are expected to be found under $ROOT. # # - output_file The location of the file where the generated code coverage # report is written. function gcov_coverage() { local gcda gcno_path line output_file="${1}" shift # Copy .gcno files next to their corresponding .gcda files in $COVERAGE_DIR # because gcov expects them to be in the same directory. while read -r line; do if [[ ${line: -4} == "gcno" ]]; then gcno_path=${line} gcda="${COVERAGE_DIR}/$(dirname "${gcno_path}")/$(basename "${gcno_path}" .gcno).gcda" # If the gcda file was not found we skip generating coverage from the gcno # file. if [[ -f "$gcda" ]]; then # gcov expects both gcno and gcda files to be in the same directory. # We overcome this by copying the gcno to $COVERAGE_DIR where the gcda # files are expected to be. if [ ! -f "${COVERAGE_DIR}/${gcno_path}" ]; then mkdir -p "${COVERAGE_DIR}/$(dirname "${gcno_path}")" cp "$ROOT/${gcno_path}" "${COVERAGE_DIR}/${gcno_path}" fi # Invoke gcov to generate a code coverage report with the flags: # -i Output gcov file in an intermediate text format. # The output is a single .gcov file per .gcda file. # No source code is required. # -o directory The directory containing the .gcno and # .gcda data files. # "${gcda"} The input file name. gcov is looking for data files # named after the input filename without its extension. # gcov produces files called .gcov in the current # directory. These contain the coverage information of the source file # they correspond to. One .gcov file is produced for each source # (or header) file containing code which was compiled to produce the # .gcda files. # Don't generate branch coverage (-b) because of a gcov issue that # segfaults when both -i and -b are used (see # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84879). "${GCOV}" -i "${COVERAGE_GCOV_OPTIONS[@]}" -o "$(dirname "${gcda}")" "${gcda}" # Append all .gcov files in the current directory to the output file. cat ./*.gcov >> "$output_file" # Delete the .gcov files. rm ./*.gcov fi fi done < "${COVERAGE_MANIFEST}" } function main() { init_gcov # If llvm code coverage is used, we output the raw code coverage report in # the $COVERAGE_OUTPUT_FILE. This report will not be converted to any other # format by LcovMerger. # TODO(#5881): Convert profdata reports to lcov. if uses_llvm; then BAZEL_CC_COVERAGE_TOOL="PROFDATA" fi # When using either gcov or lcov, have an output file specific to the test # and format used. For lcov we generate a ".dat" output file and for gcov # a ".gcov" output file. It is important that these files are generated under # COVERAGE_DIR. # When this script is invoked by tools/test/collect_coverage.sh either of # these two coverage reports will be picked up by LcovMerger and their # content will be converted and/or merged with other reports to an lcov # format, generating the final code coverage report. case "$BAZEL_CC_COVERAGE_TOOL" in ("GCOV") gcov_coverage "$COVERAGE_DIR/_cc_coverage.gcov" ;; ("PROFDATA") llvm_coverage "$COVERAGE_DIR/_cc_coverage.dat" ;; (*) echo "Coverage tool $BAZEL_CC_COVERAGE_TOOL not supported" \ && exit 1 esac } main ================================================ FILE: bazel/coverage/fuzz_coverage_wrapper.sh ================================================ #!/bin/bash set -ex TEST_BINARY=$1 shift # Clear existing corpus if previous run wasn't in sandbox rm -rf fuzz_corpus mkdir -p fuzz_corpus/seed_corpus cp -r "$@" fuzz_corpus/seed_corpus # TODO(asraa): When fuzz targets are stable, remove error suppression and run coverage while fuzzing. LLVM_PROFILE_FILE='' ${TEST_BINARY} fuzz_corpus -seed="${FUZZ_CORPUS_SEED:-1}" -max_total_time="${FUZZ_CORPUS_TIME:-60}" -max_len=2048 -rss_limit_mb=8192 -timeout=30 || : # Passing files instead of a directory will run fuzzing as a regression test. # TODO(asraa): Remove manual `|| :`, but this shouldn't be necessary. _CORPUS="$(find fuzz_corpus -type f)" while read -r line; do CORPUS+=("$line"); done \ <<< "$_CORPUS" ${TEST_BINARY} "${CORPUS[@]}" -rss_limit_mb=8192 || : ================================================ FILE: bazel/crates.bzl ================================================ """ cargo-raze crate workspace functions DO NOT EDIT! Replaced on runs of cargo-raze """ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository") def _new_http_archive(name, **kwargs): if not native.existing_rule(name): http_archive(name = name, **kwargs) def _new_git_repository(name, **kwargs): if not native.existing_rule(name): new_git_repository(name = name, **kwargs) def raze_fetch_remote_crates(): _new_http_archive( name = "raze__ahash__0_3_8", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/ahash/ahash-0.3.8.crate", type = "tar.gz", strip_prefix = "ahash-0.3.8", build_file = Label("//bazel/external/cargo/remote:ahash-0.3.8.BUILD"), ) _new_http_archive( name = "raze__autocfg__1_0_0", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/autocfg/autocfg-1.0.0.crate", type = "tar.gz", strip_prefix = "autocfg-1.0.0", build_file = Label("//bazel/external/cargo/remote:autocfg-1.0.0.BUILD"), ) _new_http_archive( name = "raze__cfg_if__0_1_10", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/cfg-if/cfg-if-0.1.10.crate", type = "tar.gz", strip_prefix = "cfg-if-0.1.10", build_file = Label("//bazel/external/cargo/remote:cfg-if-0.1.10.BUILD"), ) _new_http_archive( name = "raze__hashbrown__0_7_2", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/hashbrown/hashbrown-0.7.2.crate", type = "tar.gz", strip_prefix = "hashbrown-0.7.2", build_file = Label("//bazel/external/cargo/remote:hashbrown-0.7.2.BUILD"), ) _new_http_archive( name = "raze__libc__0_2_74", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/libc/libc-0.2.74.crate", type = "tar.gz", strip_prefix = "libc-0.2.74", build_file = Label("//bazel/external/cargo/remote:libc-0.2.74.BUILD"), ) _new_http_archive( name = "raze__log__0_4_11", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/log/log-0.4.11.crate", type = "tar.gz", strip_prefix = "log-0.4.11", build_file = Label("//bazel/external/cargo/remote:log-0.4.11.BUILD"), ) _new_http_archive( name = "raze__memory_units__0_4_0", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/memory_units/memory_units-0.4.0.crate", type = "tar.gz", strip_prefix = "memory_units-0.4.0", build_file = Label("//bazel/external/cargo/remote:memory_units-0.4.0.BUILD"), ) _new_http_archive( name = "raze__proxy_wasm__0_1_2", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/proxy-wasm/proxy-wasm-0.1.2.crate", type = "tar.gz", strip_prefix = "proxy-wasm-0.1.2", build_file = Label("//bazel/external/cargo/remote:proxy-wasm-0.1.2.BUILD"), ) _new_http_archive( name = "raze__wee_alloc__0_4_5", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/wee_alloc/wee_alloc-0.4.5.crate", type = "tar.gz", strip_prefix = "wee_alloc-0.4.5", build_file = Label("//bazel/external/cargo/remote:wee_alloc-0.4.5.BUILD"), ) _new_http_archive( name = "raze__winapi__0_3_9", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi/winapi-0.3.9.crate", type = "tar.gz", strip_prefix = "winapi-0.3.9", build_file = Label("//bazel/external/cargo/remote:winapi-0.3.9.BUILD"), ) _new_http_archive( name = "raze__winapi_i686_pc_windows_gnu__0_4_0", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi-i686-pc-windows-gnu/winapi-i686-pc-windows-gnu-0.4.0.crate", type = "tar.gz", strip_prefix = "winapi-i686-pc-windows-gnu-0.4.0", build_file = Label("//bazel/external/cargo/remote:winapi-i686-pc-windows-gnu-0.4.0.BUILD"), ) _new_http_archive( name = "raze__winapi_x86_64_pc_windows_gnu__0_4_0", url = "https://crates-io.s3-us-west-1.amazonaws.com/crates/winapi-x86_64-pc-windows-gnu/winapi-x86_64-pc-windows-gnu-0.4.0.crate", type = "tar.gz", strip_prefix = "winapi-x86_64-pc-windows-gnu-0.4.0", build_file = Label("//bazel/external/cargo/remote:winapi-x86_64-pc-windows-gnu-0.4.0.BUILD"), ) ================================================ FILE: bazel/dependency_imports.bzl ================================================ load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies") load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") load("@envoy_build_tools//toolchains:rbe_toolchains_config.bzl", "rbe_toolchains_config") load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe_exec_properties_dict", "custom_exec_properties") load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies") load("@upb//bazel:repository_defs.bzl", upb_bazel_version_repository = "bazel_version_repository") load("@io_bazel_rules_rust//rust:repositories.bzl", "rust_repositories") load("@io_bazel_rules_rust//:workspace.bzl", "bazel_version") load("@config_validation_pip3//:requirements.bzl", config_validation_pip_install = "pip_install") load("@configs_pip3//:requirements.bzl", configs_pip_install = "pip_install") load("@headersplit_pip3//:requirements.bzl", headersplit_pip_install = "pip_install") load("@kafka_pip3//:requirements.bzl", kafka_pip_install = "pip_install") load("@protodoc_pip3//:requirements.bzl", protodoc_pip_install = "pip_install") load("@thrift_pip3//:requirements.bzl", thrift_pip_install = "pip_install") load("@rules_antlr//antlr:deps.bzl", "antlr_dependencies") # go version for rules_go GO_VERSION = "1.14.7" def envoy_dependency_imports(go_version = GO_VERSION): rules_foreign_cc_dependencies() go_rules_dependencies() go_register_toolchains(go_version) rbe_toolchains_config() gazelle_dependencies() apple_rules_dependencies() rust_repositories() bazel_version(name = "bazel_version") upb_bazel_version_repository(name = "upb_bazel_version") antlr_dependencies(472) custom_exec_properties( name = "envoy_large_machine_exec_property", constants = { "LARGE_MACHINE": create_rbe_exec_properties_dict(labels = dict(size = "large")), }, ) go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable", importpath = "google.golang.org/grpc", sum = "h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=", version = "v1.29.1", ) go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", sum = "h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=", version = "v0.0.0-20190813141303-74dc4d7220e7", ) go_repository( name = "org_golang_x_text", importpath = "golang.org/x/text", sum = "h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=", version = "v0.3.0", ) config_validation_pip_install() configs_pip_install() headersplit_pip_install() kafka_pip_install() protodoc_pip_install() thrift_pip_install() ================================================ FILE: bazel/dev_binding.bzl ================================================ def _default_envoy_dev_impl(ctxt): if "LLVM_CONFIG" in ctxt.os.environ: ctxt.file("WORKSPACE", "") ctxt.file("BUILD.bazel", "") ctxt.symlink(ctxt.path(ctxt.attr.envoy_root).dirname.get_child("tools").get_child("clang_tools"), "clang_tools") _default_envoy_dev = repository_rule( implementation = _default_envoy_dev_impl, attrs = { "envoy_root": attr.label(default = "@envoy//:BUILD"), }, ) def _clang_tools_impl(ctxt): if "LLVM_CONFIG" in ctxt.os.environ: llvm_config_path = ctxt.os.environ["LLVM_CONFIG"] exec_result = ctxt.execute([llvm_config_path, "--includedir"]) if exec_result.return_code != 0: fail(llvm_config_path + " --includedir returned %d" % exec_result.return_code) clang_tools_include_path = exec_result.stdout.rstrip() exec_result = ctxt.execute([llvm_config_path, "--libdir"]) if exec_result.return_code != 0: fail(llvm_config_path + " --libdir returned %d" % exec_result.return_code) clang_tools_lib_path = exec_result.stdout.rstrip() for include_dir in ["clang", "clang-c", "llvm", "llvm-c"]: ctxt.symlink(clang_tools_include_path + "/" + include_dir, include_dir) ctxt.symlink(clang_tools_lib_path, "lib") ctxt.symlink(Label("@envoy_dev//clang_tools/support:BUILD.prebuilt"), "BUILD") _clang_tools = repository_rule( implementation = _clang_tools_impl, environ = ["LLVM_CONFIG"], ) def envoy_dev_binding(): # Treat the Envoy developer tools that require llvm as an external repo, this avoids # breaking bazel build //... when llvm is not installed. if "envoy_dev" not in native.existing_rules().keys(): _default_envoy_dev(name = "envoy_dev") _clang_tools(name = "clang_tools") ================================================ FILE: bazel/envoy_binary.bzl ================================================ load("@rules_cc//cc:defs.bzl", "cc_binary") # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy binary targets load( ":envoy_internal.bzl", "envoy_copts", "envoy_external_dep_path", "envoy_stdlib_deps", "tcmalloc_external_dep", ) # Envoy C++ binary targets should be specified with this function. def envoy_cc_binary( name, srcs = [], data = [], testonly = 0, visibility = None, external_deps = [], repository = "", stamped = False, deps = [], linkopts = [], tags = []): if not linkopts: linkopts = _envoy_linkopts() if stamped: linkopts = linkopts + _envoy_stamped_linkopts() deps = deps + _envoy_stamped_deps() deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + envoy_stdlib_deps() cc_binary( name = name, srcs = srcs, data = data, copts = envoy_copts(repository), linkopts = linkopts, testonly = testonly, linkstatic = 1, visibility = visibility, malloc = tcmalloc_external_dep(repository), stamp = 1, deps = deps, tags = tags, ) # Select the given values if exporting is enabled in the current build. def _envoy_select_exported_symbols(xs): return select({ "@envoy//bazel:enable_exported_symbols": xs, "//conditions:default": [], }) # Compute the final linkopts based on various options. def _envoy_linkopts(): return select({ # The macOS system library transitively links common libraries (e.g., pthread). "@envoy//bazel:apple": [ # See note here: https://luajit.org/install.html "-pagezero_size 10000", "-image_base 100000000", ], "@envoy//bazel:windows_x86_64": [ "-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:ws2_32.lib", "-DEFAULTLIB:iphlpapi.lib", "-WX", ], "//conditions:default": [ "-pthread", "-lrt", "-ldl", "-Wl,-z,relro,-z,now", "-Wl,--hash-style=gnu", ], }) + select({ "@envoy//bazel:boringssl_fips": [], "@envoy//bazel:windows_x86_64": [], "//conditions:default": ["-pie"], }) + _envoy_select_exported_symbols(["-Wl,-E"]) def _envoy_stamped_deps(): return select({ "@envoy//bazel:windows_x86_64": [], "@envoy//bazel:apple": [ "@envoy//bazel:raw_build_id.ldscript", ], "//conditions:default": [ "@envoy//bazel:gnu_build_id.ldscript", ], }) def _envoy_stamped_linkopts(): return select({ # Coverage builds in CI are failing to link when setting a build ID. # # /usr/bin/ld.gold: internal error in write_build_id, at ../../gold/layout.cc:5419 "@envoy//bazel:coverage_build": [], "@envoy//bazel:windows_x86_64": [], # macOS doesn't have an official equivalent to the `.note.gnu.build-id` # ELF section, so just stuff the raw ID into a new text section. "@envoy//bazel:apple": [ "-sectcreate __TEXT __build_id", "$(location @envoy//bazel:raw_build_id.ldscript)", ], # Note: assumes GNU GCC (or compatible) handling of `--build-id` flag. "//conditions:default": [ "-Wl,@$(location @envoy//bazel:gnu_build_id.ldscript)", ], }) ================================================ FILE: bazel/envoy_build_system.bzl ================================================ # The main Envoy bazel file. Load this file for all Envoy-specific build macros # and rules that you'd like to use in your BUILD files. load("@rules_foreign_cc//tools/build_defs:cmake.bzl", "cmake_external") load(":envoy_binary.bzl", _envoy_cc_binary = "envoy_cc_binary") load(":envoy_internal.bzl", "envoy_external_dep_path") load( ":envoy_library.bzl", _envoy_basic_cc_library = "envoy_basic_cc_library", _envoy_cc_extension = "envoy_cc_extension", _envoy_cc_library = "envoy_cc_library", _envoy_cc_posix_library = "envoy_cc_posix_library", _envoy_cc_win32_library = "envoy_cc_win32_library", _envoy_include_prefix = "envoy_include_prefix", _envoy_proto_library = "envoy_proto_library", ) load( ":envoy_select.bzl", _envoy_select_boringssl = "envoy_select_boringssl", _envoy_select_google_grpc = "envoy_select_google_grpc", _envoy_select_hot_restart = "envoy_select_hot_restart", _envoy_select_new_codecs_in_integration_tests = "envoy_select_new_codecs_in_integration_tests", _envoy_select_wasm = "envoy_select_wasm", _envoy_select_wasm_all_v8_wavm_none = "envoy_select_wasm_all_v8_wavm_none", _envoy_select_wasm_v8 = "envoy_select_wasm_v8", _envoy_select_wasm_wavm = "envoy_select_wasm_wavm", ) load( ":envoy_test.bzl", _envoy_benchmark_test = "envoy_benchmark_test", _envoy_cc_benchmark_binary = "envoy_cc_benchmark_binary", _envoy_cc_fuzz_test = "envoy_cc_fuzz_test", _envoy_cc_mock = "envoy_cc_mock", _envoy_cc_test = "envoy_cc_test", _envoy_cc_test_binary = "envoy_cc_test_binary", _envoy_cc_test_library = "envoy_cc_test_library", _envoy_py_test_binary = "envoy_py_test_binary", _envoy_sh_test = "envoy_sh_test", ) load( "@envoy_build_config//:extensions_build_config.bzl", "EXTENSION_PACKAGE_VISIBILITY", ) def envoy_package(): native.package(default_visibility = ["//visibility:public"]) def envoy_extension_package(): native.package(default_visibility = EXTENSION_PACKAGE_VISIBILITY) # A genrule variant that can output a directory. This is useful when doing things like # generating a fuzz corpus mechanically. def _envoy_directory_genrule_impl(ctx): tree = ctx.actions.declare_directory(ctx.attr.name + ".outputs") ctx.actions.run_shell( inputs = ctx.files.srcs, tools = ctx.files.tools, outputs = [tree], command = "mkdir -p " + tree.path + " && " + ctx.expand_location(ctx.attr.cmd), env = {"GENRULE_OUTPUT_DIR": tree.path}, ) return [DefaultInfo(files = depset([tree]))] envoy_directory_genrule = rule( implementation = _envoy_directory_genrule_impl, attrs = { "srcs": attr.label_list(), "cmd": attr.string(), "tools": attr.label_list(), }, ) # External CMake C++ library targets should be specified with this function. This defaults # to building the dependencies with ninja def envoy_cmake_external( name, cache_entries = {}, debug_cache_entries = {}, cmake_options = ["-GNinja"], make_commands = ["ninja -v", "ninja -v install"], lib_source = "", postfix_script = "", static_libraries = [], copy_pdb = False, pdb_name = "", cmake_files_dir = "$BUILD_TMPDIR/CMakeFiles", generate_crosstool_file = False, **kwargs): cache_entries.update({"CMAKE_BUILD_TYPE": "Bazel"}) cache_entries_debug = dict(cache_entries) cache_entries_debug.update(debug_cache_entries) pf = "" if copy_pdb: # TODO: Add iterator of the first list presented of these options; # static_libraries[.pdb], pdb_names, name[.pdb] files if pdb_name == "": pdb_name = name copy_command = "cp {cmake_files_dir}/{pdb_name}.dir/{pdb_name}.pdb $INSTALLDIR/lib/{pdb_name}.pdb".format(cmake_files_dir = cmake_files_dir, pdb_name = pdb_name) if postfix_script != "": copy_command = copy_command + " && " + postfix_script pf = select({ "@envoy//bazel:windows_dbg_build": copy_command, "//conditions:default": postfix_script, }) else: pf = postfix_script cmake_external( name = name, cache_entries = select({ "@envoy//bazel:dbg_build": cache_entries_debug, "//conditions:default": cache_entries, }), cmake_options = cmake_options, # TODO(lizan): Make this always true generate_crosstool_file = select({ "@envoy//bazel:windows_x86_64": True, "//conditions:default": generate_crosstool_file, }), lib_source = lib_source, make_commands = make_commands, postfix_script = pf, static_libraries = static_libraries, **kwargs ) # Used to select a dependency that has different implementations on POSIX vs Windows. # The platform-specific implementations should be specified with envoy_cc_posix_library # and envoy_cc_win32_library respectively def envoy_cc_platform_dep(name): return select({ "@envoy//bazel:windows_x86_64": [name + "_win32"], "//conditions:default": [name + "_posix"], }) # Envoy proto descriptor targets should be specified with this function. # This is used for testing only. def envoy_proto_descriptor(name, out, srcs = [], external_deps = []): input_files = ["$(location " + src + ")" for src in srcs] include_paths = [".", native.package_name()] if "api_httpbody_protos" in external_deps: srcs.append("@com_google_googleapis//google/api:httpbody.proto") include_paths.append("external/com_google_googleapis") if "http_api_protos" in external_deps: srcs.append("@com_google_googleapis//google/api:annotations.proto") srcs.append("@com_google_googleapis//google/api:http.proto") include_paths.append("external/com_google_googleapis") if "well_known_protos" in external_deps: srcs.append("@com_google_protobuf//:well_known_protos") include_paths.append("external/com_google_protobuf/src") options = ["--include_imports"] options.extend(["-I" + include_path for include_path in include_paths]) options.append("--descriptor_set_out=$@") cmd = "$(location //external:protoc) " + " ".join(options + input_files) native.genrule( name = name, srcs = srcs, outs = [out], cmd = cmd, tools = ["//external:protoc"], ) # Dependencies on Google grpc should be wrapped with this function. def envoy_google_grpc_external_deps(): return envoy_select_google_grpc([envoy_external_dep_path("grpc")]) # Here we create wrappers for each of the public targets within the separate bazel # files loaded above. This maintains envoy_build_system.bzl as the preferred import # for BUILD files that need these build macros. Do not use the imports directly # from the other bzl files (e.g. envoy_select.bzl, envoy_binary.bzl, etc.) # Select wrappers (from envoy_select.bzl) envoy_select_boringssl = _envoy_select_boringssl envoy_select_google_grpc = _envoy_select_google_grpc envoy_select_hot_restart = _envoy_select_hot_restart envoy_select_wasm = _envoy_select_wasm envoy_select_wasm_all_v8_wavm_none = _envoy_select_wasm_all_v8_wavm_none envoy_select_wasm_wavm = _envoy_select_wasm_wavm envoy_select_wasm_v8 = _envoy_select_wasm_v8 envoy_select_new_codecs_in_integration_tests = _envoy_select_new_codecs_in_integration_tests # Binary wrappers (from envoy_binary.bzl) envoy_cc_binary = _envoy_cc_binary # Library wrappers (from envoy_library.bzl) envoy_basic_cc_library = _envoy_basic_cc_library envoy_cc_extension = _envoy_cc_extension envoy_cc_library = _envoy_cc_library envoy_cc_posix_library = _envoy_cc_posix_library envoy_cc_win32_library = _envoy_cc_win32_library envoy_include_prefix = _envoy_include_prefix envoy_proto_library = _envoy_proto_library # Test wrappers (from envoy_test.bzl) envoy_cc_fuzz_test = _envoy_cc_fuzz_test envoy_cc_mock = _envoy_cc_mock envoy_cc_test = _envoy_cc_test envoy_cc_test_binary = _envoy_cc_test_binary envoy_cc_test_library = _envoy_cc_test_library envoy_cc_benchmark_binary = _envoy_cc_benchmark_binary envoy_benchmark_test = _envoy_benchmark_test envoy_py_test_binary = _envoy_py_test_binary envoy_sh_test = _envoy_sh_test ================================================ FILE: bazel/envoy_internal.bzl ================================================ # DO NOT LOAD THIS FILE. Targets from this file should be considered private # and not used outside of the @envoy//bazel package. load(":envoy_select.bzl", "envoy_select_google_grpc", "envoy_select_hot_restart") # Compute the final copts based on various options. def envoy_copts(repository, test = False): posix_options = [ "-Wall", "-Wextra", "-Werror", "-Wnon-virtual-dtor", "-Woverloaded-virtual", "-Wold-style-cast", "-Wformat", "-Wformat-security", "-Wvla", ] # Windows options for cleanest service compilation; # General MSVC C++ options for Envoy current expectations. # Target windows.h for all Windows 10 (0x0A) API prototypes (ntohll etc) # (See https://msdn.microsoft.com/en-us/library/windows/desktop/aa383745(v=vs.85).aspx ) # Optimize Windows headers by dropping GUI-oriented features from compilation msvc_options = [ "-WX", "-Zc:__cplusplus", "-DWIN32", "-D_WIN32_WINNT=0x0A00", # _WIN32_WINNT_WIN10 "-DNTDDI_VERSION=0x0A000000", # NTDDI_WIN10 "-DWIN32_LEAN_AND_MEAN", "-DNOUSER", "-DNOMCX", "-DNOIME", "-DNOCRYPT", # Ignore unguarded gcc pragmas in quiche (unrecognized by MSVC) # TODO(wrowe): Drop this change when fixed in bazel/external/quiche.genrule_cmd "-wd4068", # this is to silence the incorrect MSVC compiler warning when trying to convert between # std::optional data types while conversions between primitive types are producing no error "-wd4244", ] return select({ repository + "//bazel:windows_x86_64": msvc_options, "//conditions:default": posix_options, }) + select({ # Bazel adds an implicit -DNDEBUG for opt. repository + "//bazel:opt_build": [] if test else ["-ggdb3", "-gsplit-dwarf"], repository + "//bazel:fastbuild_build": [], repository + "//bazel:dbg_build": ["-ggdb3", "-gsplit-dwarf"], repository + "//bazel:windows_opt_build": [], repository + "//bazel:windows_fastbuild_build": [], repository + "//bazel:windows_dbg_build": [], }) + select({ repository + "//bazel:clang_build": ["-fno-limit-debug-info", "-Wgnu-conditional-omitted-operand", "-Wc++2a-extensions", "-Wrange-loop-analysis"], repository + "//bazel:gcc_build": ["-Wno-maybe-uninitialized"], "//conditions:default": [], }) + select({ repository + "//bazel:no_debug_info": ["-g0"], "//conditions:default": [], }) + select({ repository + "//bazel:disable_tcmalloc": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"], repository + "//bazel:disable_tcmalloc_on_linux_x86_64": ["-DABSL_MALLOC_HOOK_MMAP_DISABLE"], repository + "//bazel:gperftools_tcmalloc": ["-DGPERFTOOLS_TCMALLOC"], repository + "//bazel:gperftools_tcmalloc_on_linux_x86_64": ["-DGPERFTOOLS_TCMALLOC"], repository + "//bazel:debug_tcmalloc": ["-DENVOY_MEMORY_DEBUG_ENABLED=1", "-DGPERFTOOLS_TCMALLOC"], repository + "//bazel:debug_tcmalloc_on_linux_x86_64": ["-DENVOY_MEMORY_DEBUG_ENABLED=1", "-DGPERFTOOLS_TCMALLOC"], repository + "//bazel:linux_x86_64": ["-DTCMALLOC"], "//conditions:default": ["-DGPERFTOOLS_TCMALLOC"], }) + select({ repository + "//bazel:disable_signal_trace": [], "//conditions:default": ["-DENVOY_HANDLE_SIGNALS"], }) + select({ repository + "//bazel:disable_object_dump_on_signal_trace": [], "//conditions:default": ["-DENVOY_OBJECT_TRACE_ON_DUMP"], }) + select({ repository + "//bazel:disable_deprecated_features": ["-DENVOY_DISABLE_DEPRECATED_FEATURES"], "//conditions:default": [], }) + select({ repository + "//bazel:enable_log_debug_assert_in_release": ["-DENVOY_LOG_DEBUG_ASSERT_IN_RELEASE"], "//conditions:default": [], }) + select({ repository + "//bazel:disable_known_issue_asserts": ["-DENVOY_DISABLE_KNOWN_ISSUE_ASSERTS"], "//conditions:default": [], }) + select({ # APPLE_USE_RFC_3542 is needed to support IPV6_PKTINFO in MAC OS. repository + "//bazel:apple": ["-D__APPLE_USE_RFC_3542"], "//conditions:default": [], }) + envoy_select_hot_restart(["-DENVOY_HOT_RESTART"], repository) + \ _envoy_select_perf_annotation(["-DENVOY_PERF_ANNOTATION"]) + \ envoy_select_google_grpc(["-DENVOY_GOOGLE_GRPC"], repository) + \ _envoy_select_path_normalization_by_default(["-DENVOY_NORMALIZE_PATH_BY_DEFAULT"], repository) # References to Envoy external dependencies should be wrapped with this function. def envoy_external_dep_path(dep): return "//external:%s" % dep def envoy_linkstatic(): return select({ "@envoy//bazel:dynamic_link_tests": 0, "//conditions:default": 1, }) def envoy_select_force_libcpp(if_libcpp, default = None): return select({ "@envoy//bazel:force_libcpp": if_libcpp, "@envoy//bazel:apple": [], "@envoy//bazel:windows_x86_64": [], "//conditions:default": default or [], }) def envoy_stdlib_deps(): return select({ "@envoy//bazel:asan_build": ["@envoy//bazel:dynamic_stdlib"], "@envoy//bazel:msan_build": ["@envoy//bazel:dynamic_stdlib"], "@envoy//bazel:tsan_build": ["@envoy//bazel:dynamic_stdlib"], "//conditions:default": ["@envoy//bazel:static_stdlib"], }) # Dependencies on tcmalloc_and_profiler should be wrapped with this function. def tcmalloc_external_dep(repository): return select({ repository + "//bazel:disable_tcmalloc": None, repository + "//bazel:disable_tcmalloc_on_linux_x86_64": None, repository + "//bazel:debug_tcmalloc": envoy_external_dep_path("gperftools"), repository + "//bazel:debug_tcmalloc_on_linux_x86_64": envoy_external_dep_path("gperftools"), repository + "//bazel:gperftools_tcmalloc": envoy_external_dep_path("gperftools"), repository + "//bazel:gperftools_tcmalloc_on_linux_x86_64": envoy_external_dep_path("gperftools"), repository + "//bazel:linux_x86_64": envoy_external_dep_path("tcmalloc"), "//conditions:default": envoy_external_dep_path("gperftools"), }) # Select the given values if default path normalization is on in the current build. def _envoy_select_path_normalization_by_default(xs, repository = ""): return select({ repository + "//bazel:enable_path_normalization_by_default": xs, "//conditions:default": [], }) def _envoy_select_perf_annotation(xs): return select({ "@envoy//bazel:enable_perf_annotation": xs, "//conditions:default": [], }) ================================================ FILE: bazel/envoy_library.bzl ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy library targets load( ":envoy_internal.bzl", "envoy_copts", "envoy_external_dep_path", "envoy_linkstatic", ) load("@envoy_api//bazel:api_build_system.bzl", "api_cc_py_proto_library") load( "@envoy_build_config//:extensions_build_config.bzl", "EXTENSION_CONFIG_VISIBILITY", ) # As above, but wrapped in list form for adding to dep lists. This smell seems needed as # SelectorValue values have to match the attribute type. See # https://github.com/bazelbuild/bazel/issues/2273. def tcmalloc_external_deps(repository): return select({ repository + "//bazel:disable_tcmalloc": [], repository + "//bazel:disable_tcmalloc_on_linux_x86_64": [], repository + "//bazel:debug_tcmalloc": [envoy_external_dep_path("gperftools")], repository + "//bazel:debug_tcmalloc_on_linux_x86_64": [envoy_external_dep_path("gperftools")], repository + "//bazel:gperftools_tcmalloc": [envoy_external_dep_path("gperftools")], repository + "//bazel:gperftools_tcmalloc_on_linux_x86_64": [envoy_external_dep_path("gperftools")], repository + "//bazel:linux_x86_64": [envoy_external_dep_path("tcmalloc")], "//conditions:default": [envoy_external_dep_path("gperftools")], }) # Envoy C++ library targets that need no transformations or additional dependencies before being # passed to cc_library should be specified with this function. Note: this exists to ensure that # all envoy targets pass through an envoy-declared Starlark function where they can be modified # before being passed to a native bazel function. def envoy_basic_cc_library(name, deps = [], external_deps = [], **kargs): cc_library( name = name, deps = deps + [envoy_external_dep_path(dep) for dep in external_deps], **kargs ) # All Envoy extensions must be tagged with their security hardening stance with # respect to downstream and upstream data plane threats. These are verbose # labels intended to make clear the trust that operators may place in # extensions. EXTENSION_SECURITY_POSTURES = [ # This extension is hardened against untrusted downstream traffic. It # assumes that the upstream is trusted. "robust_to_untrusted_downstream", # This extension is hardened against both untrusted downstream and upstream # traffic. "robust_to_untrusted_downstream_and_upstream", # This extension is not hardened and should only be used in deployments # where both the downstream and upstream are trusted. "requires_trusted_downstream_and_upstream", # This is functionally equivalent to # requires_trusted_downstream_and_upstream, but acts as a placeholder to # allow us to identify extensions that need classifying. "unknown", # Not relevant to data plane threats, e.g. stats sinks. "data_plane_agnostic", ] EXTENSION_STATUS_VALUES = [ # This extension is stable and is expected to be production usable. "stable", # This extension is functional but has not had substantial production burn # time, use only with this caveat. "alpha", # This extension is work-in-progress. Functionality is incomplete and it is # not intended for production use. "wip", ] def envoy_cc_extension( name, security_posture, # Only set this for internal, undocumented extensions. undocumented = False, status = "stable", tags = [], extra_visibility = [], visibility = EXTENSION_CONFIG_VISIBILITY, **kwargs): if security_posture not in EXTENSION_SECURITY_POSTURES: fail("Unknown extension security posture: " + security_posture) if status not in EXTENSION_STATUS_VALUES: fail("Unknown extension status: " + status) if "//visibility:public" not in visibility: visibility = visibility + extra_visibility envoy_cc_library(name, tags = tags, visibility = visibility, **kwargs) # Envoy C++ library targets should be specified with this function. def envoy_cc_library( name, srcs = [], hdrs = [], copts = [], visibility = None, external_deps = [], tcmalloc_dep = None, repository = "", tags = [], deps = [], strip_include_prefix = None, textual_hdrs = None, defines = []): if tcmalloc_dep: deps += tcmalloc_external_deps(repository) cc_library( name = name, srcs = srcs, hdrs = hdrs, copts = envoy_copts(repository) + copts, visibility = visibility, tags = tags, textual_hdrs = textual_hdrs, deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + [ repository + "//include/envoy/common:base_includes", repository + "//source/common/common:fmt_lib", envoy_external_dep_path("abseil_flat_hash_map"), envoy_external_dep_path("abseil_flat_hash_set"), envoy_external_dep_path("abseil_strings"), envoy_external_dep_path("spdlog"), envoy_external_dep_path("fmtlib"), ], include_prefix = envoy_include_prefix(native.package_name()), alwayslink = 1, linkstatic = envoy_linkstatic(), strip_include_prefix = strip_include_prefix, defines = defines, ) # Intended for usage by external consumers. This allows them to disambiguate # include paths via `external/envoy...` cc_library( name = name + "_with_external_headers", hdrs = hdrs, copts = envoy_copts(repository) + copts, visibility = visibility, tags = ["nocompdb"], deps = [":" + name], strip_include_prefix = strip_include_prefix, ) # Used to specify a library that only builds on POSIX def envoy_cc_posix_library(name, srcs = [], hdrs = [], **kargs): envoy_cc_library( name = name + "_posix", srcs = select({ "@envoy//bazel:windows_x86_64": [], "//conditions:default": srcs, }), hdrs = select({ "@envoy//bazel:windows_x86_64": [], "//conditions:default": hdrs, }), **kargs ) # Used to specify a library that only builds on Windows def envoy_cc_win32_library(name, srcs = [], hdrs = [], **kargs): envoy_cc_library( name = name + "_win32", srcs = select({ "@envoy//bazel:windows_x86_64": srcs, "//conditions:default": [], }), hdrs = select({ "@envoy//bazel:windows_x86_64": hdrs, "//conditions:default": [], }), **kargs ) # Transform the package path (e.g. include/envoy/common) into a path for # exporting the package headers at (e.g. envoy/common). Source files can then # include using this path scheme (e.g. #include "envoy/common/time.h"). def envoy_include_prefix(path): if path.startswith("source/") or path.startswith("include/"): return "/".join(path.split("/")[1:]) return None # Envoy proto targets should be specified with this function. def envoy_proto_library(name, external_deps = [], **kwargs): api_cc_py_proto_library( name, # Avoid generating .so, we don't need it, can interfere with builds # such as OSS-Fuzz. linkstatic = 1, visibility = ["//visibility:public"], **kwargs ) ================================================ FILE: bazel/envoy_select.bzl ================================================ # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy select targets. This is in a separate file to avoid a circular # dependency with envoy_build_system.bzl. # Used to select a dependency that has different implementations on POSIX vs Windows. # The platform-specific implementations should be specified with envoy_cc_posix_library # and envoy_cc_win32_library respectively def envoy_cc_platform_dep(name): return select({ "@envoy//bazel:windows_x86_64": [name + "_win32"], "//conditions:default": [name + "_posix"], }) def envoy_select_boringssl(if_fips, default = None, if_disabled = None): return select({ "@envoy//bazel:boringssl_fips": if_fips, "@envoy//bazel:boringssl_disabled": if_disabled or [], "//conditions:default": default or [], }) # Selects the given values if Google gRPC is enabled in the current build. def envoy_select_google_grpc(xs, repository = ""): return select({ repository + "//bazel:disable_google_grpc": [], "//conditions:default": xs, }) # Selects the given values if hot restart is enabled in the current build. def envoy_select_hot_restart(xs, repository = ""): return select({ repository + "//bazel:disable_hot_restart_or_apple": [], "//conditions:default": xs, }) # Selects the given values depending on the WASM runtimes enabled in the current build. def envoy_select_wasm(xs): return select({ "@envoy//bazel:wasm_none": [], "//conditions:default": xs, }) def envoy_select_wasm_v8(xs): return select({ "@envoy//bazel:wasm_wavm": [], "@envoy//bazel:wasm_none": [], "//conditions:default": xs, }) def envoy_select_wasm_wavm(xs): return select({ "@envoy//bazel:wasm_all": xs, "@envoy//bazel:wasm_wavm": xs, "//conditions:default": [], }) def envoy_select_wasm_all_v8_wavm_none(xs1, xs2, xs3, xs4): return select({ "@envoy//bazel:wasm_all": xs1, "@envoy//bazel:wasm_v8": xs2, "@envoy//bazel:wasm_wavm": xs3, "@envoy//bazel:wasm_none": xs4, "//conditions:default": xs2, }) # Select the given values if use legacy codecs in test is on in the current build. def envoy_select_new_codecs_in_integration_tests(xs, repository = ""): return select({ repository + "//bazel:enable_new_codecs_in_integration_tests": xs, "//conditions:default": [], }) ================================================ FILE: bazel/envoy_test.bzl ================================================ load("@rules_python//python:defs.bzl", "py_binary") load("@rules_cc//cc:defs.bzl", "cc_binary", "cc_library", "cc_test") # DO NOT LOAD THIS FILE. Load envoy_build_system.bzl instead. # Envoy test targets. This includes both test library and test binary targets. load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar") load(":envoy_binary.bzl", "envoy_cc_binary") load(":envoy_library.bzl", "tcmalloc_external_deps") load( ":envoy_internal.bzl", "envoy_copts", "envoy_external_dep_path", "envoy_linkstatic", "envoy_select_force_libcpp", "envoy_stdlib_deps", "tcmalloc_external_dep", ) # Envoy C++ related test infrastructure (that want gtest, gmock, but may be # relied on by envoy_cc_test_library) should use this function. def _envoy_cc_test_infrastructure_library( name, srcs = [], hdrs = [], data = [], external_deps = [], deps = [], repository = "", tags = [], include_prefix = None, copts = [], **kargs): # Add implicit tcmalloc external dependency(if available) in order to enable CPU and heap profiling in tests. deps += tcmalloc_external_deps(repository) cc_library( name = name, srcs = srcs, hdrs = hdrs, data = data, copts = envoy_copts(repository, test = True) + copts, testonly = 1, deps = deps + [envoy_external_dep_path(dep) for dep in external_deps] + [ envoy_external_dep_path("googletest"), ], tags = tags, include_prefix = include_prefix, alwayslink = 1, linkstatic = envoy_linkstatic(), **kargs ) # Compute the test linkopts based on various options. def _envoy_test_linkopts(): return select({ "@envoy//bazel:apple": [ # See note here: https://luajit.org/install.html "-pagezero_size 10000", "-image_base 100000000", ], "@envoy//bazel:windows_x86_64": [ "-DEFAULTLIB:advapi32.lib", "-DEFAULTLIB:ws2_32.lib", "-DEFAULTLIB:iphlpapi.lib", "-WX", ], # TODO(mattklein123): It's not great that we universally link against the following libs. # In particular, -latomic and -lrt are not needed on all platforms. Make this more granular. "//conditions:default": ["-pthread", "-lrt", "-ldl"], }) + envoy_select_force_libcpp([], ["-lstdc++fs", "-latomic"]) # Envoy C++ fuzz test targets. These are not included in coverage runs. def envoy_cc_fuzz_test( name, corpus, dictionaries = [], repository = "", size = "medium", deps = [], tags = [], **kwargs): if not (corpus.startswith("//") or corpus.startswith(":") or corpus.startswith("@")): corpus_name = name + "_corpus" corpus = native.glob([corpus + "/**"]) native.filegroup( name = corpus_name, srcs = corpus, ) else: corpus_name = corpus tar_src = [corpus_name] if dictionaries: tar_src += dictionaries pkg_tar( name = name + "_corpus_tar", srcs = tar_src, testonly = 1, ) fuzz_copts = ["-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION"] test_lib_name = name + "_lib" envoy_cc_test_library( name = test_lib_name, deps = deps + envoy_stdlib_deps() + [ repository + "//test/fuzz:fuzz_runner_lib", repository + "//test/test_common:test_version_linkstamp", ], repository = repository, tags = tags, **kwargs ) cc_test( name = name, copts = fuzz_copts + envoy_copts("@envoy", test = True), linkopts = _envoy_test_linkopts() + select({ "@envoy//bazel:libfuzzer": ["-fsanitize=fuzzer"], "//conditions:default": [], }), linkstatic = envoy_linkstatic(), args = select({ "@envoy//bazel:libfuzzer_coverage": ["$(locations %s)" % corpus_name], "@envoy//bazel:libfuzzer": [], "//conditions:default": ["$(locations %s)" % corpus_name], }), data = [corpus_name], # No fuzzing on macOS or Windows deps = select({ "@envoy//bazel:apple": [repository + "//test:dummy_main"], "@envoy//bazel:windows_x86_64": [repository + "//test:dummy_main"], "@envoy//bazel:libfuzzer": [ ":" + test_lib_name, ], "//conditions:default": [ ":" + test_lib_name, repository + "//test/fuzz:main", ], }), size = size, tags = ["fuzz_target"] + tags, ) # This target exists only for # https://github.com/google/oss-fuzz/blob/master/projects/envoy/build.sh. It won't yield # anything useful on its own, as it expects to be run in an environment where the linker options # provide a path to FuzzingEngine. cc_binary( name = name + "_driverless", copts = fuzz_copts + envoy_copts("@envoy", test = True), linkopts = ["-lFuzzingEngine"] + _envoy_test_linkopts(), linkstatic = 1, testonly = 1, deps = [":" + test_lib_name], tags = ["manual"] + tags, ) # Envoy C++ test targets should be specified with this function. def envoy_cc_test( name, srcs = [], data = [], # List of pairs (Bazel shell script target, shell script args) repository = "", external_deps = [], deps = [], tags = [], args = [], copts = [], shard_count = None, coverage = True, local = False, size = "medium", flaky = False): coverage_tags = tags + ([] if coverage else ["nocoverage"]) cc_test( name = name, srcs = srcs, data = data, copts = envoy_copts(repository, test = True) + copts, linkopts = _envoy_test_linkopts(), linkstatic = envoy_linkstatic(), malloc = tcmalloc_external_dep(repository), deps = envoy_stdlib_deps() + deps + [envoy_external_dep_path(dep) for dep in external_deps + ["googletest"]] + [ repository + "//test:main", repository + "//test/test_common:test_version_linkstamp", ], # from https://github.com/google/googletest/blob/6e1970e2376c14bf658eb88f655a054030353f9f/googlemock/src/gmock.cc#L51 # 2 - by default, mocks act as StrictMocks. args = args + ["--gmock_default_mock_behavior=2"], tags = coverage_tags, local = local, shard_count = shard_count, size = size, flaky = flaky, ) # Envoy C++ test related libraries (that want gtest, gmock) should be specified # with this function. def envoy_cc_test_library( name, srcs = [], hdrs = [], data = [], external_deps = [], deps = [], repository = "", tags = [], include_prefix = None, copts = [], **kargs): deps = deps + [ repository + "//test/test_common:printers_includes", ] _envoy_cc_test_infrastructure_library( name, srcs, hdrs, data, external_deps, deps, repository, tags, include_prefix, copts, visibility = ["//visibility:public"], **kargs ) # Envoy test binaries should be specified with this function. def envoy_cc_test_binary( name, tags = [], deps = [], **kargs): envoy_cc_binary( name, testonly = 1, linkopts = _envoy_test_linkopts(), tags = tags + ["compilation_db_dep"], deps = deps + [ "@envoy//test/test_common:test_version_linkstamp", ], **kargs ) # Envoy benchmark binaries should be specified with this function. bazel run # these targets to measure performance. def envoy_cc_benchmark_binary( name, deps = [], repository = "", **kargs): envoy_cc_test_binary( name, deps = deps + [repository + "//test/benchmark:main"], repository = repository, **kargs ) # Tests to validate that Envoy benchmarks run successfully should be specified # with this function. Not for actual performance measurements: iteratons and # expensive benchmarks will be skipped in the interest of execution time. def envoy_benchmark_test( name, benchmark_binary, data = [], tags = [], **kargs): native.sh_test( name = name, srcs = ["//bazel:test_for_benchmark_wrapper.sh"], data = [":" + benchmark_binary] + data, args = ["%s/%s" % (native.package_name(), benchmark_binary)], tags = tags + ["nocoverage"], **kargs ) # Envoy Python test binaries should be specified with this function. def envoy_py_test_binary( name, external_deps = [], deps = [], **kargs): py_binary( name = name, deps = deps + [envoy_external_dep_path(dep) for dep in external_deps], **kargs ) # Envoy C++ mock targets should be specified with this function. def envoy_cc_mock(name, **kargs): envoy_cc_test_library(name = name, **kargs) # Envoy shell tests that need to be included in coverage run should be specified with this function. def envoy_sh_test( name, srcs = [], data = [], coverage = True, cc_binary = [], tags = [], **kargs): if coverage: if cc_binary == []: fail("cc_binary is required for coverage-enabled test.") test_runner_cc = name + "_test_runner.cc" native.genrule( name = name + "_gen_test_runner", srcs = srcs, outs = [test_runner_cc], cmd = "$(location //bazel:gen_sh_test_runner.sh) $(SRCS) >> $@", tools = ["//bazel:gen_sh_test_runner.sh"], ) envoy_cc_test( name = name, srcs = [test_runner_cc], data = srcs + data + cc_binary, tags = tags, deps = ["//test/test_common:environment_lib"] + cc_binary, **kargs ) else: native.sh_test( name = name, srcs = ["//bazel:sh_test_wrapper.sh"], data = srcs + data + cc_binary, args = srcs, tags = tags + ["nocoverage"], **kargs ) ================================================ FILE: bazel/external/BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 # Use a wrapper cc_library with an empty source source file to force # compilation of other cc_library targets that only list *.a sources. cc_library( name = "all_external", srcs = [":empty.cc"], defines = ["OPENTRACING_STATIC"], # TODO: external/io_opentracing_cpp/BUILD.bazel:19:1: Executing genrule # @io_opentracing_cpp//:generate_version_h failed - needs porting tags = ["skip_on_windows"], deps = [ "@com_github_datadog_dd_opentracing_cpp//:dd_opentracing_cpp", "@com_google_googletest//:gtest", "@com_lightstep_tracer_cpp//:lightstep_tracer", "@io_opentracing_cpp//:opentracing", ], ) genrule( name = "empty_cc", outs = ["empty.cc"], cmd = "touch \"$(@D)/empty.cc\"", visibility = ["//visibility:public"], ) ================================================ FILE: bazel/external/boringssl_fips.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") load(":genrule_cmd.bzl", "genrule_cmd") licenses(["notice"]) # Apache 2 cc_library( name = "crypto", srcs = [ "crypto/libcrypto.a", ], hdrs = glob(["boringssl/include/openssl/*.h"]), defines = ["BORINGSSL_FIPS"], includes = ["boringssl/include"], visibility = ["//visibility:public"], ) cc_library( name = "ssl", srcs = [ "ssl/libssl.a", ], hdrs = glob(["boringssl/include/openssl/*.h"]), includes = ["boringssl/include"], visibility = ["//visibility:public"], deps = [":crypto"], ) genrule( name = "build", srcs = glob(["boringssl/**"]), outs = [ "crypto/libcrypto.a", "ssl/libssl.a", ], cmd = genrule_cmd("@envoy//bazel/external:boringssl_fips.genrule_cmd"), ) ================================================ FILE: bazel/external/boringssl_fips.genrule_cmd ================================================ #!/bin/bash set -e # BoringSSL build as described in the Security Policy for BoringCrypto module (2020-07-02): # https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp3678.pdf # This works only on Linux-x86_64. if [[ `uname` != "Linux" || `uname -m` != "x86_64" ]]; then echo "ERROR: BoringSSL FIPS is currently supported only on Linux-x86_64." exit 1 fi # Bazel magic. ROOT=$$(dirname $(rootpath boringssl/BUILDING.md))/.. pushd $$ROOT # Build tools requirements: # - Clang compiler version 7.0.1 (https://releases.llvm.org/download.html) # - Go programming language version 1.12.7 (https://golang.org/dl/) # - Ninja build system version 1.9.0 (https://github.com/ninja-build/ninja/releases) # Override $$PATH for build tools, to avoid picking up anything else. export PATH="$$(dirname `which cmake`):/usr/bin:/bin" # Clang 7.0.1 VERSION=7.0.1 SHA256=02ad925add5b2b934d64c3dd5cbd1b2002258059f7d962993ba7f16524c3089c PLATFORM="x86_64-linux-gnu-ubuntu-16.04" curl -sLO https://releases.llvm.org/"$$VERSION"/clang+llvm-"$$VERSION"-"$$PLATFORM".tar.xz \ && echo "$$SHA256" clang+llvm-"$$VERSION"-"$$PLATFORM".tar.xz | sha256sum --check tar xf clang+llvm-"$$VERSION"-"$$PLATFORM".tar.xz export HOME="$$PWD" printf "set(CMAKE_C_COMPILER \"clang\")\nset(CMAKE_CXX_COMPILER \"clang++\")\n" > $${HOME}/toolchain export PATH="$$PWD/clang+llvm-$$VERSION-$$PLATFORM/bin:$$PATH" if [[ `clang --version | head -1 | awk '{print $$3}'` != "$$VERSION" ]]; then echo "ERROR: Clang version doesn't match." exit 1 fi # Go 1.12.7 VERSION=1.12.7 SHA256=66d83bfb5a9ede000e33c6579a91a29e6b101829ad41fffb5c5bb6c900e109d9 PLATFORM="linux-amd64" curl -sLO https://dl.google.com/go/go"$$VERSION"."$$PLATFORM".tar.gz \ && echo "$$SHA256" go"$$VERSION"."$$PLATFORM".tar.gz | sha256sum --check tar xf go"$$VERSION"."$$PLATFORM".tar.gz export GOPATH="$$PWD/gopath" export GOROOT="$$PWD/go" export PATH="$$GOPATH/bin:$$GOROOT/bin:$$PATH" if [[ `go version | awk '{print $$3}'` != "go$$VERSION" ]]; then echo "ERROR: Go version doesn't match." exit 1 fi # Ninja 1.9.0 VERSION=1.9.0 SHA256=1b1235f2b0b4df55ac6d80bbe681ea3639c9d2c505c7ff2159a3daf63d196305 PLATFORM="linux" curl -sLO https://github.com/ninja-build/ninja/releases/download/v"$$VERSION"/ninja-"$$PLATFORM".zip \ && echo "$$SHA256" ninja-"$$PLATFORM".zip | sha256sum --check unzip -o ninja-"$$PLATFORM".zip export PATH="$$PWD:$$PATH" if [[ `ninja --version` != "$$VERSION" ]]; then echo "ERROR: Ninja version doesn't match." exit 1 fi # Clean after previous build. rm -rf boringssl/build # Build BoringSSL. cd boringssl mkdir build && cd build && cmake -GNinja -DCMAKE_TOOLCHAIN_FILE=$${HOME}/toolchain -DFIPS=1 -DCMAKE_BUILD_TYPE=Release .. ninja ninja run_tests # Verify correctness of the FIPS build. if [[ `tool/bssl isfips` != "1" ]]; then echo "ERROR: BoringSSL tool didn't report FIPS build." exit 1 fi # Move compiled libraries to the expected destinations. popd mv $$ROOT/boringssl/build/crypto/libcrypto.a $(execpath crypto/libcrypto.a) mv $$ROOT/boringssl/build/ssl/libssl.a $(execpath ssl/libssl.a) ================================================ FILE: bazel/external/boringssl_fips.patch ================================================ # Fix FIPS build (from BoringSSL commit 4ca15d5dcbe6e8051a4654df7c971ea8307abfe0). # # The modulewrapper is not a part of the FIPS module, so it can be patched without # concern about breaking the FIPS validation. --- boringssl/util/fipstools/acvp/modulewrapper/modulewrapper.cc +++ boringssl/util/fipstools/acvp/modulewrapper/modulewrapper.cc @@ -12,9 +12,11 @@ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include #include #include +#include #include #include #include ================================================ FILE: bazel/external/cargo/BUILD ================================================ """ cargo-raze workspace build file. DO NOT EDIT! Replaced on runs of cargo-raze """ package(default_visibility = ["//visibility:public"]) licenses([ "notice", # See individual crates for specific licenses ]) alias( name = "log", actual = "@raze__log__0_4_11//:log", tags = ["cargo-raze"], ) alias( name = "proxy_wasm", actual = "@raze__proxy_wasm__0_1_2//:proxy_wasm", tags = ["cargo-raze"], ) ================================================ FILE: bazel/external/cargo/remote/BUILD ================================================ ================================================ FILE: bazel/external/cargo/remote/ahash-0.3.8.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # MIT from expression "MIT OR Apache-2.0" ]) # Unsupported target "ahash" with type "bench" omitted rust_library( name = "ahash", srcs = glob(["**/*.rs"]), crate_features = [ ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2018", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.3.8", deps = [ ], ) # Unsupported target "bench" with type "test" omitted # Unsupported target "map" with type "bench" omitted # Unsupported target "map_tests" with type "test" omitted # Unsupported target "nopanic" with type "test" omitted ================================================ FILE: bazel/external/cargo/remote/autocfg-1.0.0.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # Apache-2.0 from expression "Apache-2.0 OR MIT" ]) rust_library( name = "autocfg", srcs = glob(["**/*.rs"]), crate_features = [ ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2015", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "1.0.0", deps = [ ], ) # Unsupported target "integers" with type "example" omitted # Unsupported target "paths" with type "example" omitted # Unsupported target "rustflags" with type "test" omitted # Unsupported target "traits" with type "example" omitted # Unsupported target "versions" with type "example" omitted ================================================ FILE: bazel/external/cargo/remote/cfg-if-0.1.10.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # MIT from expression "MIT OR Apache-2.0" ]) rust_library( name = "cfg_if", srcs = glob(["**/*.rs"]), crate_features = [ ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2018", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.1.10", deps = [ ], ) # Unsupported target "xcrate" with type "test" omitted ================================================ FILE: bazel/external/cargo/remote/hashbrown-0.7.2.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # Apache-2.0 from expression "Apache-2.0 OR MIT" ]) # Unsupported target "bench" with type "bench" omitted # Unsupported target "build-script-build" with type "custom-build" omitted rust_library( name = "hashbrown", srcs = glob(["**/*.rs"]), crate_features = [ "ahash", "inline-more", ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2018", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.7.2", deps = [ "@raze__ahash__0_3_8//:ahash", ], ) # Unsupported target "hasher" with type "test" omitted # Unsupported target "rayon" with type "test" omitted # Unsupported target "serde" with type "test" omitted # Unsupported target "set" with type "test" omitted ================================================ FILE: bazel/external/cargo/remote/libc-0.2.74.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # MIT from expression "MIT OR Apache-2.0" ]) # Unsupported target "build-script-build" with type "custom-build" omitted # Unsupported target "const_fn" with type "test" omitted rust_library( name = "libc", srcs = glob(["**/*.rs"]), crate_features = [ ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2015", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.2.74", deps = [ ], ) ================================================ FILE: bazel/external/cargo/remote/log-0.4.11.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # MIT from expression "MIT OR Apache-2.0" ]) # Unsupported target "build-script-build" with type "custom-build" omitted # Unsupported target "filters" with type "test" omitted rust_library( name = "log", srcs = glob(["**/*.rs"]), crate_features = [ ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2015", rustc_flags = [ "--cap-lints=allow", "--cfg=atomic_cas", ], tags = ["cargo-raze"], version = "0.4.11", deps = [ "@raze__cfg_if__0_1_10//:cfg_if", ], ) # Unsupported target "macros" with type "test" omitted ================================================ FILE: bazel/external/cargo/remote/memory_units-0.4.0.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "reciprocal", # MPL-2.0 from expression "MPL-2.0" ]) rust_library( name = "memory_units", srcs = glob(["**/*.rs"]), crate_features = [ ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2015", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.4.0", deps = [ ], ) ================================================ FILE: bazel/external/cargo/remote/proxy-wasm-0.1.2.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # Apache-2.0 from expression "Apache-2.0" ]) # Unsupported target "hello_world" with type "example" omitted # Unsupported target "http_auth_random" with type "example" omitted # Unsupported target "http_body" with type "example" omitted # Unsupported target "http_headers" with type "example" omitted rust_library( name = "proxy_wasm", srcs = glob(["**/*.rs"]), crate_features = [ ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2018", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.1.2", deps = [ "@raze__hashbrown__0_7_2//:hashbrown", "@raze__log__0_4_11//:log", "@raze__wee_alloc__0_4_5//:wee_alloc", ], ) ================================================ FILE: bazel/external/cargo/remote/wee_alloc-0.4.5.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "reciprocal", # MPL-2.0 from expression "MPL-2.0" ]) # Unsupported target "build-script-build" with type "custom-build" omitted rust_library( name = "wee_alloc", srcs = glob(["**/*.rs"]), crate_features = [ "default", "size_classes", ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2015", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.4.5", deps = [ "@raze__cfg_if__0_1_10//:cfg_if", "@raze__libc__0_2_74//:libc", "@raze__memory_units__0_4_0//:memory_units", ], ) ================================================ FILE: bazel/external/cargo/remote/winapi-0.3.9.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # MIT from expression "MIT OR Apache-2.0" ]) # Unsupported target "build-script-build" with type "custom-build" omitted rust_library( name = "winapi", srcs = glob(["**/*.rs"]), crate_features = [ "memoryapi", "synchapi", "winbase", ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2015", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.3.9", deps = [ ], ) ================================================ FILE: bazel/external/cargo/remote/winapi-i686-pc-windows-gnu-0.4.0.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # MIT from expression "MIT OR Apache-2.0" ]) # Unsupported target "build-script-build" with type "custom-build" omitted rust_library( name = "winapi_i686_pc_windows_gnu", srcs = glob(["**/*.rs"]), crate_features = [ ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2015", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.4.0", deps = [ ], ) ================================================ FILE: bazel/external/cargo/remote/winapi-x86_64-pc-windows-gnu-0.4.0.BUILD ================================================ """ cargo-raze crate build file. DO NOT EDIT! Replaced on runs of cargo-raze """ load( "@io_bazel_rules_rust//rust:rust.bzl", "rust_library", ) package(default_visibility = [ # Public for visibility by "@raze__crate__version//" targets. # # Prefer access through "//bazel/external/cargo", which limits external # visibility to explicit Cargo.toml dependencies. "//visibility:public", ]) licenses([ "notice", # MIT from expression "MIT OR Apache-2.0" ]) # Unsupported target "build-script-build" with type "custom-build" omitted rust_library( name = "winapi_x86_64_pc_windows_gnu", srcs = glob(["**/*.rs"]), crate_features = [ ], crate_root = "src/lib.rs", crate_type = "lib", edition = "2015", rustc_flags = [ "--cap-lints=allow", ], tags = ["cargo-raze"], version = "0.4.0", deps = [ ], ) ================================================ FILE: bazel/external/compiler_rt.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "fuzzed_data_provider", hdrs = ["include/fuzzer/FuzzedDataProvider.h"], strip_include_prefix = "include", visibility = ["//visibility:public"], ) libfuzzer_copts = [ "-fno-sanitize=address,thread,undefined", "-fsanitize-coverage=0", "-O3", ] cc_library( name = "libfuzzer_main", srcs = ["lib/fuzzer/FuzzerMain.cpp"], copts = libfuzzer_copts, visibility = ["//visibility:public"], deps = [":libfuzzer_no_main"], alwayslink = True, ) cc_library( name = "libfuzzer_no_main", srcs = glob( ["lib/fuzzer/Fuzzer*.cpp"], exclude = ["lib/fuzzer/FuzzerMain.cpp"], ), hdrs = glob([ "lib/fuzzer/Fuzzer*.h", "lib/fuzzer/Fuzzer*.def", ]), copts = libfuzzer_copts, visibility = ["//visibility:public"], alwayslink = True, ) ================================================ FILE: bazel/external/fmtlib.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "fmtlib", hdrs = glob([ "include/fmt/*.h", ]), defines = ["FMT_HEADER_ONLY"], includes = ["include"], visibility = ["//visibility:public"], ) ================================================ FILE: bazel/external/http-parser.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "http_parser", srcs = [ "http_parser.c", "http_parser.h", ], hdrs = ["http_parser.h"], # This compiler flag is set to an arbtitrarily high number so # as to effectively disables the http_parser header limit, as # we do our own checks in the conn manager and codec. copts = ["-DHTTP_MAX_HEADER_SIZE=0x2000000"], includes = ["."], visibility = ["//visibility:public"], ) ================================================ FILE: bazel/external/kafka_int32.patch ================================================ --- DescribeGroupsResponse.json 2020-03-25 16:12:16.373302600 -0400 +++ DescribeGroupsResponse.json 2020-03-25 16:11:16.184156200 -0400 @@ -63,7 +63,7 @@ { "name": "MemberAssignment", "type": "bytes", "versions": "0+", "about": "The current assignment provided by the group leader." } ]}, - { "name": "AuthorizedOperations", "type": "int32", "versions": "3+", "default": "-2147483648", + { "name": "AuthorizedOperations", "type": "int32", "versions": "3+", "default": "INT32_MIN", "about": "32-bit bitfield to represent authorized operations for this group." } ]} ] --- MetadataResponse.json 2020-03-25 15:53:36.319161000 -0400 +++ MetadataResponse.json 2020-03-25 15:54:11.510400000 -0400 @@ -81,10 +81,10 @@ { "name": "OfflineReplicas", "type": "[]int32", "versions": "5+", "ignorable": true, "about": "The set of offline replicas of this partition." } ]}, - { "name": "TopicAuthorizedOperations", "type": "int32", "versions": "8+", "default": "-2147483648", + { "name": "TopicAuthorizedOperations", "type": "int32", "versions": "8+", "default": "INT32_MIN", "about": "32-bit bitfield to represent authorized operations for this topic." } ]}, - { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8+", "default": "-2147483648", + { "name": "ClusterAuthorizedOperations", "type": "int32", "versions": "8+", "default": "INT32_MIN", "about": "32-bit bitfield to represent authorized operations for this cluster." } ] } ================================================ FILE: bazel/external/libcircllhist.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "libcircllhist", srcs = ["src/circllhist.c"], hdrs = [ "src/circllhist.h", ], copts = select({ "@envoy//bazel:windows_x86_64": ["-DWIN32"], "//conditions:default": [], }), includes = ["src"], visibility = ["//visibility:public"], ) ================================================ FILE: bazel/external/libprotobuf_mutator.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "libprotobuf_mutator", srcs = glob( [ "src/**/*.cc", "src/**/*.h", "port/protobuf.h", ], exclude = ["**/*_test.cc"], ), hdrs = ["src/libfuzzer/libfuzzer_macro.h"], include_prefix = "libprotobuf_mutator", includes = ["."], visibility = ["//visibility:public"], deps = ["//external:protobuf"], ) ================================================ FILE: bazel/external/proxy_wasm_cpp_host.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") load( "@envoy//bazel:envoy_build_system.bzl", "envoy_select_wasm_all_v8_wavm_none", "envoy_select_wasm_v8", "envoy_select_wasm_wavm", ) licenses(["notice"]) # Apache 2 package(default_visibility = ["//visibility:public"]) cc_library( name = "include", hdrs = glob(["include/proxy-wasm/**/*.h"]), deps = [ "@proxy_wasm_cpp_sdk//:common_lib", ], ) cc_library( name = "lib", # Note that the select cannot appear in the glob. srcs = envoy_select_wasm_all_v8_wavm_none( glob( [ "src/**/*.h", "src/**/*.cc", ], ), glob( [ "src/**/*.h", "src/**/*.cc", ], exclude = ["src/wavm/*"], ), glob( [ "src/**/*.h", "src/**/*.cc", ], exclude = ["src/v8/*"], ), glob( [ "src/**/*.h", "src/**/*.cc", ], exclude = [ "src/wavm/*", "src/v8/*", ], ), ), copts = envoy_select_wasm_wavm([ '-DWAVM_API=""', "-Wno-non-virtual-dtor", "-Wno-old-style-cast", ]), deps = [ ":include", "//external:abseil_flat_hash_map", "//external:abseil_optional", "//external:abseil_strings", "//external:protobuf", "//external:ssl", "//external:zlib", "@proxy_wasm_cpp_sdk//:api_lib", "@proxy_wasm_cpp_sdk//:common_lib", ] + envoy_select_wasm_wavm([ "@envoy//bazel/foreign_cc:wavm", ]) + envoy_select_wasm_v8([ "//external:wee8", ]), ) ================================================ FILE: bazel/external/quiche.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_proto_library") load("@rules_proto//proto:defs.bzl", "proto_library") load(":genrule_cmd.bzl", "genrule_cmd") load( "@envoy//bazel:envoy_build_system.bzl", "envoy_cc_library", "envoy_cc_test", "envoy_cc_test_library", ) licenses(["notice"]) # Apache 2 # QUICHE is Google's implementation of QUIC and related protocols. It is the # same code used in Chromium and Google's servers, but packaged in a form that # is intended to be easier to incorporate into third-party projects. # # QUICHE code falls into three groups: # 1. Platform-independent code. Most QUICHE code is in this category. # 2. APIs and type aliases to platform-dependent code/types, referenced by code # in group 1. This group is called the "Platform API". # 3. Definitions of types declared in group 2. This group is called the # "Platform impl", and must be provided by the codebase that embeds QUICHE. # # Concretely, header files in group 2 (the Platform API) #include header and # source files in group 3 (the Platform impl). Unfortunately, QUICHE does not # yet provide a built-in way to customize this dependency, e.g. to override the # directory or namespace in which Platform impl types are defined. Hence the # gross hacks in quiche.genrule_cmd, invoked from here to tweak QUICHE source # files into a form usable by Envoy. # # The mechanics of this will change as QUICHE evolves, supplies its own Bazel # buildfiles, and provides a built-in way to override platform impl directory # location. However, the end result (QUICHE files placed under # quiche/{http2,quic,spdy}/, with the Envoy-specific implementation of the # QUICHE platform APIs in //source/extensions/quic_listeners/quiche/platform/, # should remain largely the same. src_files = glob([ "**/*.h", "**/*.c", "**/*.cc", "**/*.inc", "**/*.proto", ]) genrule( name = "quiche_files", srcs = src_files, outs = ["quiche/" + f for f in src_files], cmd = genrule_cmd("@envoy//bazel/external:quiche.genrule_cmd"), visibility = ["//visibility:private"], ) # These options are only used to suppress errors in brought-in QUICHE tests. # Use #pragma GCC diagnostic ignored in integration code to suppress these errors. quiche_common_copts = [ "-Wno-unused-function", # quic_inlined_frame.h uses offsetof() to optimize memory usage in frames. "-Wno-invalid-offsetof", "-Wno-range-loop-analysis", ] quiche_copts = select({ # Ignore unguarded #pragma GCC statements in QUICHE sources "@envoy//bazel:windows_x86_64": ["-wd4068"], # Remove these after upstream fix. "@envoy//bazel:gcc_build": [ "-Wno-sign-compare", ] + quiche_common_copts, "//conditions:default": quiche_common_copts, }) test_suite( name = "ci_tests", tests = [ "http2_platform_api_test", "quic_platform_api_test", "quiche_common_test", "spdy_platform_api_test", ], ) envoy_cc_test_library( name = "http2_test_tools_random", srcs = ["quiche/http2/test_tools/http2_random.cc"], hdrs = ["quiche/http2/test_tools/http2_random.h"], external_deps = ["ssl"], repository = "@envoy", deps = [":http2_platform"], ) envoy_cc_library( name = "http2_platform", hdrs = [ "quiche/http2/platform/api/http2_bug_tracker.h", "quiche/http2/platform/api/http2_containers.h", "quiche/http2/platform/api/http2_estimate_memory_usage.h", "quiche/http2/platform/api/http2_flag_utils.h", "quiche/http2/platform/api/http2_flags.h", "quiche/http2/platform/api/http2_logging.h", "quiche/http2/platform/api/http2_macros.h", "quiche/http2/platform/api/http2_string_utils.h", # TODO: uncomment the following files as implementations are added. # "quiche/http2/platform/api/http2_test_helpers.h", ], repository = "@envoy", visibility = ["//visibility:public"], deps = [ ":quiche_common_platform", "@envoy//source/extensions/quic_listeners/quiche/platform:http2_platform_impl_lib", ], ) envoy_cc_library( name = "http2_constants_lib", srcs = ["quiche/http2/http2_constants.cc"], hdrs = ["quiche/http2/http2_constants.h"], copts = quiche_copts, repository = "@envoy", deps = [":http2_platform"], ) envoy_cc_library( name = "http2_structures_lib", srcs = ["quiche/http2/http2_structures.cc"], hdrs = ["quiche/http2/http2_structures.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_decoder_decode_buffer_lib", srcs = ["quiche/http2/decoder/decode_buffer.cc"], hdrs = ["quiche/http2/decoder/decode_buffer.h"], copts = quiche_copts, repository = "@envoy", deps = [":http2_platform"], ) envoy_cc_library( name = "http2_decoder_decode_http2_structures_lib", srcs = ["quiche/http2/decoder/decode_http2_structures.cc"], hdrs = ["quiche/http2/decoder/decode_http2_structures.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_decode_status_lib", srcs = ["quiche/http2/decoder/decode_status.cc"], hdrs = ["quiche/http2/decoder/decode_status.h"], copts = quiche_copts, repository = "@envoy", deps = [":http2_platform"], ) envoy_cc_library( name = "http2_decoder_frame_decoder_state_lib", srcs = ["quiche/http2/decoder/frame_decoder_state.cc"], hdrs = ["quiche/http2/decoder/frame_decoder_state.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_structure_decoder_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_frame_decoder_lib", srcs = ["quiche/http2/decoder/http2_frame_decoder.cc"], hdrs = [ "quiche/http2/decoder/frame_decoder_state.h", "quiche/http2/decoder/http2_frame_decoder.h", ], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_decoder_payload_decoders_altsvc_payload_decoder_lib", ":http2_decoder_payload_decoders_continuation_payload_decoder_lib", ":http2_decoder_payload_decoders_data_payload_decoder_lib", ":http2_decoder_payload_decoders_goaway_payload_decoder_lib", ":http2_decoder_payload_decoders_headers_payload_decoder_lib", ":http2_decoder_payload_decoders_ping_payload_decoder_lib", ":http2_decoder_payload_decoders_priority_payload_decoder_lib", ":http2_decoder_payload_decoders_push_promise_payload_decoder_lib", ":http2_decoder_payload_decoders_rst_stream_payload_decoder_lib", ":http2_decoder_payload_decoders_settings_payload_decoder_lib", ":http2_decoder_payload_decoders_unknown_payload_decoder_lib", ":http2_decoder_payload_decoders_window_update_payload_decoder_lib", ":http2_decoder_structure_decoder_lib", ":http2_hpack_varint_hpack_varint_decoder_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_frame_decoder_listener_lib", srcs = ["quiche/http2/decoder/http2_frame_decoder_listener.cc"], hdrs = ["quiche/http2/decoder/http2_frame_decoder_listener.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_altsvc_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/altsvc_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/altsvc_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_continuation_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/continuation_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/continuation_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_data_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/data_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/data_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_goaway_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/goaway_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/goaway_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_headers_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/headers_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/headers_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_ping_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/ping_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/ping_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_priority_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/priority_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/priority_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_push_promise_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/push_promise_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/push_promise_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_rst_stream_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/rst_stream_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_settings_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/settings_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/settings_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_unknown_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/unknown_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/unknown_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_payload_decoders_window_update_payload_decoder_lib", srcs = ["quiche/http2/decoder/payload_decoders/window_update_payload_decoder.cc"], hdrs = ["quiche/http2/decoder/payload_decoders/window_update_payload_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_http2_structures_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_decoder_frame_decoder_state_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_decoder_structure_decoder_lib", srcs = ["quiche/http2/decoder/http2_structure_decoder.cc"], hdrs = ["quiche/http2/decoder/http2_structure_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_http2_structures_lib", ":http2_decoder_decode_status_lib", ":http2_platform", ":http2_structures_lib", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_block_decoder_lib", srcs = ["quiche/http2/hpack/decoder/hpack_block_decoder.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_block_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_hpack_decoder_hpack_decoding_error_lib", ":http2_hpack_decoder_hpack_entry_decoder_lib", ":http2_hpack_decoder_hpack_entry_decoder_listener_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_decoder_lib", srcs = ["quiche/http2/hpack/decoder/hpack_decoder.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_hpack_decoder_hpack_block_decoder_lib", ":http2_hpack_decoder_hpack_decoder_listener_lib", ":http2_hpack_decoder_hpack_decoder_state_lib", ":http2_hpack_decoder_hpack_decoder_tables_lib", ":http2_hpack_decoder_hpack_decoding_error_lib", ":http2_hpack_decoder_hpack_whole_entry_buffer_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_decoder_listener_lib", srcs = ["quiche/http2/hpack/decoder/hpack_decoder_listener.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_decoder_listener.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_hpack_hpack_constants_lib", ":http2_hpack_hpack_string_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_decoder_state_lib", srcs = ["quiche/http2/hpack/decoder/hpack_decoder_state.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_decoder_state.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_hpack_decoder_hpack_decoder_listener_lib", ":http2_hpack_decoder_hpack_decoder_string_buffer_lib", ":http2_hpack_decoder_hpack_decoder_tables_lib", ":http2_hpack_decoder_hpack_decoding_error_lib", ":http2_hpack_decoder_hpack_whole_entry_listener_lib", ":http2_hpack_hpack_constants_lib", ":http2_hpack_hpack_string_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_decoder_string_buffer_lib", srcs = ["quiche/http2/hpack/decoder/hpack_decoder_string_buffer.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_decoder_string_buffer.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_hpack_huffman_hpack_huffman_decoder_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_decoder_tables_lib", srcs = ["quiche/http2/hpack/decoder/hpack_decoder_tables.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_decoder_tables.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_hpack_hpack_constants_lib", ":http2_hpack_hpack_static_table_entries_lib", ":http2_hpack_hpack_string_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_decoding_error_lib", srcs = ["quiche/http2/hpack/decoder/hpack_decoding_error.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_decoding_error.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":quiche_common_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_entry_decoder_lib", srcs = ["quiche/http2/hpack/decoder/hpack_entry_decoder.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_entry_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_hpack_decoder_hpack_decoding_error_lib", ":http2_hpack_decoder_hpack_entry_decoder_listener_lib", ":http2_hpack_decoder_hpack_entry_type_decoder_lib", ":http2_hpack_decoder_hpack_string_decoder_lib", ":http2_hpack_hpack_constants_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_entry_decoder_listener_lib", srcs = ["quiche/http2/hpack/decoder/hpack_entry_decoder_listener.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_entry_decoder_listener.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_hpack_hpack_constants_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_entry_type_decoder_lib", srcs = ["quiche/http2/hpack/decoder/hpack_entry_type_decoder.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_entry_type_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_hpack_hpack_constants_lib", ":http2_hpack_varint_hpack_varint_decoder_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_string_decoder_lib", srcs = ["quiche/http2/hpack/decoder/hpack_string_decoder.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_string_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_hpack_varint_hpack_varint_decoder_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_string_decoder_listener_lib", srcs = ["quiche/http2/hpack/decoder/hpack_string_decoder_listener.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_string_decoder_listener.h"], copts = quiche_copts, repository = "@envoy", deps = [":http2_platform"], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_whole_entry_buffer_lib", srcs = ["quiche/http2/hpack/decoder/hpack_whole_entry_buffer.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_whole_entry_buffer.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_hpack_decoder_hpack_decoder_string_buffer_lib", ":http2_hpack_decoder_hpack_decoding_error_lib", ":http2_hpack_decoder_hpack_entry_decoder_listener_lib", ":http2_hpack_decoder_hpack_whole_entry_listener_lib", ":http2_hpack_hpack_constants_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_decoder_hpack_whole_entry_listener_lib", srcs = ["quiche/http2/hpack/decoder/hpack_whole_entry_listener.cc"], hdrs = ["quiche/http2/hpack/decoder/hpack_whole_entry_listener.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_hpack_decoder_hpack_decoder_string_buffer_lib", ":http2_hpack_decoder_hpack_decoding_error_lib", ":http2_hpack_hpack_constants_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_huffman_hpack_huffman_decoder_lib", srcs = ["quiche/http2/hpack/huffman/hpack_huffman_decoder.cc"], hdrs = ["quiche/http2/hpack/huffman/hpack_huffman_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_platform", ":quiche_common_platform", ], ) envoy_cc_library( name = "http2_hpack_huffman_hpack_huffman_encoder_lib", srcs = ["quiche/http2/hpack/huffman/hpack_huffman_encoder.cc"], hdrs = ["quiche/http2/hpack/huffman/hpack_huffman_encoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_hpack_huffman_huffman_spec_tables_lib", ":http2_platform", ":quiche_common_platform", ], ) envoy_cc_library( name = "http2_hpack_huffman_huffman_spec_tables_lib", srcs = ["quiche/http2/hpack/huffman/huffman_spec_tables.cc"], hdrs = ["quiche/http2/hpack/huffman/huffman_spec_tables.h"], copts = quiche_copts, repository = "@envoy", ) envoy_cc_library( name = "http2_hpack_hpack_constants_lib", srcs = ["quiche/http2/hpack/http2_hpack_constants.cc"], hdrs = ["quiche/http2/hpack/http2_hpack_constants.h"], copts = quiche_copts, repository = "@envoy", deps = [":http2_platform"], ) envoy_cc_library( name = "http2_hpack_hpack_static_table_entries_lib", hdrs = ["quiche/http2/hpack/hpack_static_table_entries.inc"], repository = "@envoy", ) envoy_cc_library( name = "http2_hpack_hpack_string_lib", srcs = ["quiche/http2/hpack/hpack_string.cc"], hdrs = ["quiche/http2/hpack/hpack_string.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_platform", ":quiche_common_platform", ], ) envoy_cc_library( name = "http2_hpack_varint_hpack_varint_decoder_lib", srcs = ["quiche/http2/hpack/varint/hpack_varint_decoder.cc"], hdrs = ["quiche/http2/hpack/varint/hpack_varint_decoder.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_platform", ], ) envoy_cc_library( name = "http2_hpack_varint_hpack_varint_encoder_lib", srcs = ["quiche/http2/hpack/varint/hpack_varint_encoder.cc"], hdrs = ["quiche/http2/hpack/varint/hpack_varint_encoder.h"], copts = quiche_copts, repository = "@envoy", deps = [":http2_platform"], ) envoy_cc_library( name = "spdy_platform", hdrs = [ "quiche/spdy/platform/api/spdy_bug_tracker.h", "quiche/spdy/platform/api/spdy_containers.h", "quiche/spdy/platform/api/spdy_endianness_util.h", "quiche/spdy/platform/api/spdy_estimate_memory_usage.h", "quiche/spdy/platform/api/spdy_flags.h", "quiche/spdy/platform/api/spdy_logging.h", "quiche/spdy/platform/api/spdy_macros.h", "quiche/spdy/platform/api/spdy_mem_slice.h", "quiche/spdy/platform/api/spdy_string_utils.h", ], repository = "@envoy", visibility = ["//visibility:public"], deps = [ ":quiche_common_lib", "@envoy//source/extensions/quic_listeners/quiche/platform:spdy_platform_impl_lib", ], ) envoy_cc_library( name = "spdy_simple_arena_lib", srcs = ["quiche/spdy/core/spdy_simple_arena.cc"], hdrs = ["quiche/spdy/core/spdy_simple_arena.h"], repository = "@envoy", visibility = ["//visibility:public"], deps = [":spdy_platform"], ) envoy_cc_test_library( name = "spdy_platform_test_helpers", hdrs = ["quiche/spdy/platform/api/spdy_test_helpers.h"], repository = "@envoy", deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:spdy_platform_test_helpers_impl_lib"], ) envoy_cc_library( name = "spdy_core_alt_svc_wire_format_lib", srcs = ["quiche/spdy/core/spdy_alt_svc_wire_format.cc"], hdrs = ["quiche/spdy/core/spdy_alt_svc_wire_format.h"], copts = quiche_copts, repository = "@envoy", visibility = ["//visibility:public"], deps = [":spdy_platform"], ) envoy_cc_library( name = "spdy_core_fifo_write_scheduler_lib", hdrs = ["quiche/spdy/core/fifo_write_scheduler.h"], repository = "@envoy", deps = [ ":spdy_core_write_scheduler_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_framer_lib", srcs = [ "quiche/spdy/core/spdy_frame_builder.cc", "quiche/spdy/core/spdy_framer.cc", ], hdrs = [ "quiche/spdy/core/spdy_frame_builder.h", "quiche/spdy/core/spdy_framer.h", ], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_platform", ":spdy_core_alt_svc_wire_format_lib", ":spdy_core_frame_reader_lib", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", ":spdy_core_hpack_hpack_lib", ":spdy_core_protocol_lib", ":spdy_core_zero_copy_output_buffer_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_frame_reader_lib", srcs = ["quiche/spdy/core/spdy_frame_reader.cc"], hdrs = ["quiche/spdy/core/spdy_frame_reader.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":spdy_core_protocol_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_header_block_lib", srcs = ["quiche/spdy/core/spdy_header_block.cc"], hdrs = ["quiche/spdy/core/spdy_header_block.h"], copts = quiche_copts, repository = "@envoy", visibility = ["//visibility:public"], deps = [ ":spdy_core_header_storage_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_header_storage_lib", srcs = ["quiche/spdy/core/spdy_header_storage.cc"], hdrs = ["quiche/spdy/core/spdy_header_storage.h"], copts = quiche_copts, repository = "@envoy", deps = [ "spdy_simple_arena_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_headers_handler_interface_lib", hdrs = ["quiche/spdy/core/spdy_headers_handler_interface.h"], copts = quiche_copts, repository = "@envoy", visibility = ["//visibility:public"], deps = [":spdy_platform"], ) envoy_cc_library( name = "spdy_core_http2_deframer_lib", srcs = ["quiche/spdy/core/http2_frame_decoder_adapter.cc"], hdrs = ["quiche/spdy/core/http2_frame_decoder_adapter.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_constants_lib", ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_decoder_frame_decoder_lib", ":http2_decoder_frame_decoder_listener_lib", ":http2_platform", ":http2_structures_lib", ":spdy_core_alt_svc_wire_format_lib", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", ":spdy_core_hpack_hpack_decoder_adapter_lib", ":spdy_core_hpack_hpack_lib", ":spdy_core_protocol_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_lifo_write_scheduler_lib", hdrs = ["quiche/spdy/core/lifo_write_scheduler.h"], repository = "@envoy", deps = [ ":spdy_core_write_scheduler_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_intrusive_list_lib", hdrs = ["quiche/spdy/core/spdy_intrusive_list.h"], repository = "@envoy", ) envoy_cc_library( name = "spdy_core_http2_priority_write_scheduler_lib", hdrs = ["quiche/spdy/core/http2_priority_write_scheduler.h"], repository = "@envoy", deps = [ ":spdy_core_intrusive_list_lib", ":spdy_core_protocol_lib", ":spdy_core_write_scheduler_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_hpack_hpack_lib", srcs = [ "quiche/spdy/core/hpack/hpack_constants.cc", "quiche/spdy/core/hpack/hpack_encoder.cc", "quiche/spdy/core/hpack/hpack_entry.cc", "quiche/spdy/core/hpack/hpack_header_table.cc", "quiche/spdy/core/hpack/hpack_huffman_table.cc", "quiche/spdy/core/hpack/hpack_output_stream.cc", "quiche/spdy/core/hpack/hpack_static_table.cc", ], hdrs = [ "quiche/spdy/core/hpack/hpack_constants.h", "quiche/spdy/core/hpack/hpack_encoder.h", "quiche/spdy/core/hpack/hpack_entry.h", "quiche/spdy/core/hpack/hpack_header_table.h", "quiche/spdy/core/hpack/hpack_huffman_table.h", "quiche/spdy/core/hpack/hpack_output_stream.h", "quiche/spdy/core/hpack/hpack_static_table.h", ], copts = quiche_copts, repository = "@envoy", deps = [ ":spdy_core_protocol_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_hpack_hpack_decoder_adapter_lib", srcs = ["quiche/spdy/core/hpack/hpack_decoder_adapter.cc"], hdrs = ["quiche/spdy/core/hpack/hpack_decoder_adapter.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":http2_hpack_decoder_hpack_decoder_lib", ":http2_hpack_decoder_hpack_decoder_listener_lib", ":http2_hpack_decoder_hpack_decoder_tables_lib", ":http2_hpack_hpack_constants_lib", ":http2_hpack_hpack_string_lib", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", ":spdy_core_hpack_hpack_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_priority_write_scheduler_lib", srcs = ["quiche/spdy/core/priority_write_scheduler.h"], repository = "@envoy", deps = [ ":http2_platform", ":spdy_core_protocol_lib", ":spdy_core_write_scheduler_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_protocol_lib", srcs = ["quiche/spdy/core/spdy_protocol.cc"], hdrs = [ "quiche/spdy/core/spdy_bitmasks.h", "quiche/spdy/core/spdy_protocol.h", ], copts = quiche_copts, repository = "@envoy", visibility = ["//visibility:public"], deps = [ ":spdy_core_alt_svc_wire_format_lib", ":spdy_core_header_block_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_write_scheduler_lib", hdrs = ["quiche/spdy/core/write_scheduler.h"], repository = "@envoy", deps = [ ":spdy_core_protocol_lib", ":spdy_platform", ], ) envoy_cc_test_library( name = "spdy_core_test_utils_lib", srcs = ["quiche/spdy/core/spdy_test_utils.cc"], hdrs = ["quiche/spdy/core/spdy_test_utils.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":quiche_common_test_tools_test_utils_lib", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", ":spdy_core_protocol_lib", ":spdy_platform", ], ) envoy_cc_library( name = "spdy_core_zero_copy_output_buffer_lib", hdrs = ["quiche/spdy/core/zero_copy_output_buffer.h"], copts = quiche_copts, repository = "@envoy", ) envoy_cc_library( name = "quic_platform", srcs = [ "quiche/quic/platform/api/quic_file_utils.cc", "quiche/quic/platform/api/quic_hostname_utils.cc", "quiche/quic/platform/api/quic_mutex.cc", ], hdrs = [ "quiche/quic/platform/api/quic_cert_utils.h", "quiche/quic/platform/api/quic_file_utils.h", "quiche/quic/platform/api/quic_hostname_utils.h", "quiche/quic/platform/api/quic_mutex.h", "quiche/quic/platform/api/quic_pcc_sender.h", ], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_time_lib", ":quic_platform_base", "@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_impl_lib", ], ) envoy_cc_library( name = "quic_platform_base", hdrs = [ "quiche/quic/platform/api/quic_aligned.h", "quiche/quic/platform/api/quic_bug_tracker.h", "quiche/quic/platform/api/quic_client_stats.h", "quiche/quic/platform/api/quic_containers.h", "quiche/quic/platform/api/quic_error_code_wrappers.h", "quiche/quic/platform/api/quic_estimate_memory_usage.h", "quiche/quic/platform/api/quic_exported_stats.h", "quiche/quic/platform/api/quic_fallthrough.h", "quiche/quic/platform/api/quic_flag_utils.h", "quiche/quic/platform/api/quic_flags.h", "quiche/quic/platform/api/quic_iovec.h", "quiche/quic/platform/api/quic_logging.h", "quiche/quic/platform/api/quic_macros.h", "quiche/quic/platform/api/quic_map_util.h", "quiche/quic/platform/api/quic_mem_slice.h", "quiche/quic/platform/api/quic_prefetch.h", "quiche/quic/platform/api/quic_ptr_util.h", "quiche/quic/platform/api/quic_reference_counted.h", "quiche/quic/platform/api/quic_server_stats.h", "quiche/quic/platform/api/quic_stack_trace.h", "quiche/quic/platform/api/quic_stream_buffer_allocator.h", "quiche/quic/platform/api/quic_string_utils.h", "quiche/quic/platform/api/quic_uint128.h", # TODO: uncomment the following files as implementations are added. # "quiche/quic/platform/api/quic_fuzzed_data_provider.h", # "quiche/quic/platform/api/quic_test_loopback.h", ], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_platform_export", ":quiche_common_lib", "@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_base_impl_lib", ], ) envoy_cc_library( name = "quic_platform_export", hdrs = ["quiche/quic/platform/api/quic_export.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_export_impl_lib"], ) envoy_cc_test_library( name = "quic_platform_epoll_lib", hdrs = ["quiche/quic/platform/api/quic_epoll.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_epoll_impl_lib"], ) envoy_cc_test_library( name = "quic_platform_expect_bug", hdrs = ["quiche/quic/platform/api/quic_expect_bug.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_expect_bug_impl_lib"], ) envoy_cc_library( name = "quic_platform_ip_address_family", hdrs = ["quiche/quic/platform/api/quic_ip_address_family.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], ) envoy_cc_library( name = "quic_platform_ip_address", srcs = ["quiche/quic/platform/api/quic_ip_address.cc"], hdrs = ["quiche/quic/platform/api/quic_ip_address.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_platform_base", ":quic_platform_export", ":quic_platform_ip_address_family", ], ) envoy_cc_test_library( name = "quic_platform_mock_log", hdrs = ["quiche/quic/platform/api/quic_mock_log.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_mock_log_impl_lib"], ) envoy_cc_test_library( name = "quic_platform_port_utils", hdrs = ["quiche/quic/platform/api/quic_port_utils.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_port_utils_impl_lib"], ) envoy_cc_library( name = "quic_platform_udp_socket", hdrs = select({ "@envoy//bazel:linux": ["quiche/quic/platform/api/quic_udp_socket_platform_api.h"], "//conditions:default": [], }), repository = "@envoy", tags = ["nofips"], deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_udp_socket_impl_lib"], ) envoy_cc_test_library( name = "quic_platform_sleep", hdrs = ["quiche/quic/platform/api/quic_sleep.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_sleep_impl_lib"], ) envoy_cc_library( name = "quic_platform_socket_address", srcs = ["quiche/quic/platform/api/quic_socket_address.cc"], hdrs = ["quiche/quic/platform/api/quic_socket_address.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_platform_export", ":quic_platform_ip_address", ], ) envoy_cc_test_library( name = "quic_platform_test", hdrs = ["quiche/quic/platform/api/quic_test.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_impl_lib"], ) envoy_cc_test_library( name = "quic_platform_test_output", hdrs = ["quiche/quic/platform/api/quic_test_output.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_output_impl_lib"], ) envoy_cc_test_library( name = "quic_platform_system_event_loop", hdrs = ["quiche/quic/platform/api/quic_system_event_loop.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_system_event_loop_impl_lib"], ) envoy_cc_test_library( name = "quic_platform_thread", hdrs = ["quiche/quic/platform/api/quic_thread.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_thread_impl_lib"], ) envoy_cc_library( name = "quiche_common_platform_endian", hdrs = ["quiche/common/platform/api/quiche_endian.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quiche_common_platform_export", "@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_endian_impl_lib", ], ) envoy_cc_library( name = "quiche_common_platform_export", hdrs = ["quiche/common/platform/api/quiche_export.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_export_impl_lib"], ) envoy_cc_test_library( name = "quiche_common_test_tools_test_utils_lib", srcs = ["quiche/common/test_tools/quiche_test_utils.cc"], hdrs = [ "quiche/common/platform/api/quiche_test.h", "quiche/common/test_tools/quiche_test_utils.h", ], repository = "@envoy", tags = ["nofips"], deps = [ ":quiche_common_platform", "@envoy//test/extensions/quic_listeners/quiche/platform:quiche_common_platform_test_impl_lib", ], ) #TODO(danzh) Figure out why using envoy_proto_library() fails. proto_library( name = "quic_core_proto_cached_network_parameters_proto", srcs = ["quiche/quic/core/proto/cached_network_parameters.proto"], ) cc_proto_library( name = "quic_core_proto_cached_network_parameters_proto_cc", deps = [":quic_core_proto_cached_network_parameters_proto"], ) envoy_cc_library( name = "quic_core_proto_cached_network_parameters_proto_header", hdrs = ["quiche/quic/core/proto/cached_network_parameters_proto.h"], repository = "@envoy", tags = ["nofips"], deps = [":quic_core_proto_cached_network_parameters_proto_cc"], ) proto_library( name = "quic_core_proto_source_address_token_proto", srcs = ["quiche/quic/core/proto/source_address_token.proto"], deps = [":quic_core_proto_cached_network_parameters_proto"], ) cc_proto_library( name = "quic_core_proto_source_address_token_proto_cc", deps = [":quic_core_proto_source_address_token_proto"], ) envoy_cc_library( name = "quic_core_proto_source_address_token_proto_header", hdrs = ["quiche/quic/core/proto/source_address_token_proto.h"], repository = "@envoy", tags = ["nofips"], deps = [":quic_core_proto_source_address_token_proto_cc"], ) proto_library( name = "quic_core_proto_crypto_server_config_proto", srcs = ["quiche/quic/core/proto/crypto_server_config.proto"], ) cc_proto_library( name = "quic_core_proto_crypto_server_config_proto_cc", deps = [":quic_core_proto_crypto_server_config_proto"], ) envoy_cc_library( name = "quic_core_proto_crypto_server_config_proto_header", hdrs = ["quiche/quic/core/proto/crypto_server_config_proto.h"], repository = "@envoy", tags = ["nofips"], deps = [":quic_core_proto_crypto_server_config_proto_cc"], ) envoy_cc_library( name = "quic_core_ack_listener_interface_lib", srcs = ["quiche/quic/core/quic_ack_listener_interface.cc"], hdrs = ["quiche/quic/core/quic_ack_listener_interface.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_time_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_alarm_interface_lib", srcs = ["quiche/quic/core/quic_alarm.cc"], hdrs = ["quiche/quic/core/quic_alarm.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_arena_scoped_ptr_lib", ":quic_core_time_lib", ], ) envoy_cc_library( name = "quic_core_alarm_factory_interface_lib", hdrs = ["quiche/quic/core/quic_alarm_factory.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_alarm_interface_lib", ":quic_core_one_block_arena_lib", ], ) envoy_cc_library( name = "quic_core_bandwidth_lib", srcs = ["quiche/quic/core/quic_bandwidth.cc"], hdrs = ["quiche/quic/core/quic_bandwidth.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_constants_lib", ":quic_core_time_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_batch_writer_batch_writer_buffer_lib", srcs = select({ "@envoy//bazel:linux": [ "quiche/quic/core/batch_writer/quic_batch_writer_buffer.cc", ], "//conditions:default": [], }), hdrs = select({ "@envoy//bazel:linux": [ "quiche/quic/core/batch_writer/quic_batch_writer_buffer.h", ], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_circular_deque_lib", ":quic_core_linux_socket_utils_lib", ":quic_core_packet_writer_interface_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_batch_writer_batch_writer_base_lib", srcs = select({ "@envoy//bazel:linux": [ "quiche/quic/core/batch_writer/quic_batch_writer_base.cc", ], "//conditions:default": [], }), hdrs = select({ "@envoy//bazel:linux": [ "quiche/quic/core/batch_writer/quic_batch_writer_base.h", ], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_batch_writer_batch_writer_buffer_lib", ":quic_core_packet_writer_interface_lib", ":quic_core_types_lib", ":quic_platform", ], ) envoy_cc_test_library( name = "quic_core_batch_writer_batch_writer_test_lib", hdrs = select({ "@envoy//bazel:linux": [ "quiche/quic/core/batch_writer/quic_batch_writer_test.h", ], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_batch_writer_batch_writer_base_lib", ":quic_core_udp_socket_lib", ":quic_platform_test", ], ) envoy_cc_library( name = "quic_core_batch_writer_gso_batch_writer_lib", srcs = select({ "@envoy//bazel:linux": [ "quiche/quic/core/batch_writer/quic_gso_batch_writer.cc", ], "//conditions:default": [], }), hdrs = select({ "@envoy//bazel:linux": [ "quiche/quic/core/batch_writer/quic_gso_batch_writer.h", ], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_batch_writer_batch_writer_base_lib", ":quic_core_linux_socket_utils_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_batch_writer_sendmmsg_batch_writer_lib", srcs = select({ "@envoy//bazel:linux": [ "quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.cc", ], "//conditions:default": [], }), hdrs = select({ "@envoy//bazel:linux": [ "quiche/quic/core/batch_writer/quic_sendmmsg_batch_writer.h", ], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_batch_writer_batch_writer_base_lib", ":quic_core_linux_socket_utils_lib", ], ) envoy_cc_library( name = "quic_core_blocked_writer_interface_lib", hdrs = ["quiche/quic/core/quic_blocked_writer_interface.h"], repository = "@envoy", tags = ["nofips"], deps = [":quic_platform_export"], ) envoy_cc_library( name = "quic_core_arena_scoped_ptr_lib", hdrs = ["quiche/quic/core/quic_arena_scoped_ptr.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [":quic_platform_base"], ) envoy_cc_library( name = "quic_core_buffer_allocator_lib", srcs = [ "quiche/quic/core/quic_buffer_allocator.cc", "quiche/quic/core/quic_simple_buffer_allocator.cc", ], hdrs = [ "quiche/quic/core/quic_buffer_allocator.h", "quiche/quic/core/quic_simple_buffer_allocator.h", ], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [":quic_platform_export"], ) envoy_cc_library( name = "quic_core_clock_lib", srcs = ["quiche/quic/core/quic_clock.cc"], hdrs = ["quiche/quic/core/quic_clock.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_time_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_coalesced_packet_lib", srcs = ["quiche/quic/core/quic_coalesced_packet.cc"], hdrs = ["quiche/quic/core/quic_coalesced_packet.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":quic_core_packets_lib", ], ) envoy_cc_library( name = "quic_core_config_lib", srcs = ["quiche/quic/core/quic_config.cc"], hdrs = ["quiche/quic/core/quic_config.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_constants_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_crypto_encryption_lib", ":quic_core_packets_lib", ":quic_core_socket_address_coder_lib", ":quic_core_time_lib", ":quic_core_utils_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_congestion_control_bandwidth_sampler_lib", srcs = ["quiche/quic/core/congestion_control/bandwidth_sampler.cc"], hdrs = ["quiche/quic/core/congestion_control/bandwidth_sampler.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_congestion_control_windowed_filter_lib", ":quic_core_packet_number_indexed_queue_lib", ":quic_core_packets_lib", ":quic_core_time_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_congestion_control_bbr_lib", srcs = ["quiche/quic/core/congestion_control/bbr_sender.cc"], hdrs = ["quiche/quic/core/congestion_control/bbr_sender.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_congestion_control_bandwidth_sampler_lib", ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_congestion_control_rtt_stats_lib", ":quic_core_congestion_control_windowed_filter_lib", ":quic_core_crypto_encryption_lib", ":quic_core_crypto_random_lib", ":quic_core_packets_lib", ":quic_core_time_lib", ":quic_core_unacked_packet_map_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_congestion_control_bbr2_lib", srcs = [ "quiche/quic/core/congestion_control/bbr2_drain.cc", "quiche/quic/core/congestion_control/bbr2_misc.cc", "quiche/quic/core/congestion_control/bbr2_probe_bw.cc", "quiche/quic/core/congestion_control/bbr2_probe_rtt.cc", "quiche/quic/core/congestion_control/bbr2_sender.cc", "quiche/quic/core/congestion_control/bbr2_startup.cc", ], hdrs = [ "quiche/quic/core/congestion_control/bbr2_drain.h", "quiche/quic/core/congestion_control/bbr2_misc.h", "quiche/quic/core/congestion_control/bbr2_probe_bw.h", "quiche/quic/core/congestion_control/bbr2_probe_rtt.h", "quiche/quic/core/congestion_control/bbr2_sender.h", "quiche/quic/core/congestion_control/bbr2_startup.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_congestion_control_bandwidth_sampler_lib", ":quic_core_congestion_control_bbr_lib", ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_congestion_control_rtt_stats_lib", ":quic_core_congestion_control_windowed_filter_lib", ":quic_core_crypto_encryption_lib", ":quic_core_time_lib", ":quic_core_types_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_congestion_control_general_loss_algorithm_lib", srcs = ["quiche/quic/core/congestion_control/general_loss_algorithm.cc"], hdrs = ["quiche/quic/core/congestion_control/general_loss_algorithm.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_congestion_control_rtt_stats_lib", ":quic_core_packets_lib", ":quic_core_time_lib", ":quic_core_unacked_packet_map_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_congestion_control_congestion_control_interface_lib", hdrs = [ "quiche/quic/core/congestion_control/loss_detection_interface.h", "quiche/quic/core/congestion_control/send_algorithm_interface.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_clock_lib", ":quic_core_config_lib", ":quic_core_connection_stats_lib", ":quic_core_crypto_random_lib", ":quic_core_packets_lib", ":quic_core_time_lib", ":quic_core_types_lib", ":quic_core_unacked_packet_map_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_congestion_control_congestion_control_lib", srcs = [ "quiche/quic/core/congestion_control/send_algorithm_interface.cc", ], hdrs = [ "quiche/quic/core/congestion_control/loss_detection_interface.h", "quiche/quic/core/congestion_control/send_algorithm_interface.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_config_lib", ":quic_core_congestion_control_bbr2_lib", ":quic_core_congestion_control_bbr_lib", ":quic_core_congestion_control_tcp_cubic_bytes_lib", ":quic_core_connection_stats_lib", ":quic_core_crypto_random_lib", ":quic_core_packets_lib", ":quic_core_time_lib", ":quic_core_types_lib", ":quic_core_unacked_packet_map_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_congestion_control_pacing_sender_lib", srcs = ["quiche/quic/core/congestion_control/pacing_sender.cc"], hdrs = ["quiche/quic/core/congestion_control/pacing_sender.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_config_lib", ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_packets_lib", ":quic_core_time_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_congestion_control_rtt_stats_lib", srcs = ["quiche/quic/core/congestion_control/rtt_stats.cc"], hdrs = ["quiche/quic/core/congestion_control/rtt_stats.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_packets_lib", ":quic_core_time_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_congestion_control_tcp_cubic_helper", srcs = [ "quiche/quic/core/congestion_control/hybrid_slow_start.cc", "quiche/quic/core/congestion_control/prr_sender.cc", ], hdrs = [ "quiche/quic/core/congestion_control/hybrid_slow_start.h", "quiche/quic/core/congestion_control/prr_sender.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_packets_lib", ":quic_core_time_lib", ":quic_platform_base", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_congestion_control_tcp_cubic_bytes_lib", srcs = [ "quiche/quic/core/congestion_control/cubic_bytes.cc", "quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.cc", ], hdrs = [ "quiche/quic/core/congestion_control/cubic_bytes.h", "quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_congestion_control_rtt_stats_lib", ":quic_core_congestion_control_tcp_cubic_helper", ":quic_core_connection_stats_lib", ":quic_core_constants_lib", ":quic_core_crypto_encryption_lib", ":quic_core_packets_lib", ":quic_core_time_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_congestion_control_uber_loss_algorithm_lib", srcs = ["quiche/quic/core/congestion_control/uber_loss_algorithm.cc"], hdrs = ["quiche/quic/core/congestion_control/uber_loss_algorithm.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":quic_core_congestion_control_general_loss_algorithm_lib"], ) envoy_cc_library( name = "quic_core_congestion_control_windowed_filter_lib", hdrs = ["quiche/quic/core/congestion_control/windowed_filter.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":quic_core_time_lib"], ) envoy_cc_library( name = "quic_core_connection_lib", srcs = ["quiche/quic/core/quic_connection.cc"], hdrs = ["quiche/quic/core/quic_connection.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_alarm_factory_interface_lib", ":quic_core_alarm_interface_lib", ":quic_core_bandwidth_lib", ":quic_core_blocked_writer_interface_lib", ":quic_core_config_lib", ":quic_core_connection_stats_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", ":quic_core_idle_network_detector_lib", ":quic_core_legacy_version_encapsulator_lib", ":quic_core_mtu_discovery_lib", ":quic_core_network_blackhole_detector_lib", ":quic_core_one_block_arena_lib", ":quic_core_packet_creator_lib", ":quic_core_packet_writer_interface_lib", ":quic_core_packets_lib", ":quic_core_proto_cached_network_parameters_proto_header", ":quic_core_sent_packet_manager_lib", ":quic_core_time_lib", ":quic_core_types_lib", ":quic_core_uber_received_packet_manager_lib", ":quic_core_utils_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_connection_stats_lib", srcs = ["quiche/quic/core/quic_connection_stats.cc"], hdrs = ["quiche/quic/core/quic_connection_stats.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_packets_lib", ":quic_core_time_accumulator_lib", ":quic_core_time_lib", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_constants_lib", srcs = ["quiche/quic/core/quic_constants.cc"], hdrs = ["quiche/quic/core/quic_constants.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_types_lib", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_crypto_crypto_handshake_lib", srcs = [ "quiche/quic/core/crypto/cert_compressor.cc", "quiche/quic/core/crypto/channel_id.cc", "quiche/quic/core/crypto/common_cert_set.cc", "quiche/quic/core/crypto/crypto_framer.cc", "quiche/quic/core/crypto/crypto_handshake.cc", "quiche/quic/core/crypto/crypto_handshake_message.cc", "quiche/quic/core/crypto/crypto_secret_boxer.cc", "quiche/quic/core/crypto/crypto_utils.cc", "quiche/quic/core/crypto/curve25519_key_exchange.cc", "quiche/quic/core/crypto/key_exchange.cc", "quiche/quic/core/crypto/p256_key_exchange.cc", "quiche/quic/core/crypto/quic_compressed_certs_cache.cc", "quiche/quic/core/crypto/quic_crypto_client_config.cc", "quiche/quic/core/crypto/quic_crypto_server_config.cc", "quiche/quic/core/crypto/server_proof_verifier.h", "quiche/quic/core/crypto/transport_parameters.cc", ], hdrs = [ "quiche/quic/core/crypto/cert_compressor.h", "quiche/quic/core/crypto/channel_id.h", "quiche/quic/core/crypto/common_cert_set.h", "quiche/quic/core/crypto/crypto_framer.h", "quiche/quic/core/crypto/crypto_handshake.h", "quiche/quic/core/crypto/crypto_handshake_message.h", "quiche/quic/core/crypto/crypto_message_parser.h", "quiche/quic/core/crypto/crypto_secret_boxer.h", "quiche/quic/core/crypto/crypto_utils.h", "quiche/quic/core/crypto/curve25519_key_exchange.h", "quiche/quic/core/crypto/key_exchange.h", "quiche/quic/core/crypto/p256_key_exchange.h", "quiche/quic/core/crypto/proof_verifier.h", "quiche/quic/core/crypto/quic_compressed_certs_cache.h", "quiche/quic/core/crypto/quic_crypto_client_config.h", "quiche/quic/core/crypto/quic_crypto_server_config.h", "quiche/quic/core/crypto/transport_parameters.h", ], copts = quiche_copts, external_deps = [ "ssl", "zlib", ], repository = "@envoy", tags = [ "nofips", "pg3", ], textual_hdrs = [ "quiche/quic/core/crypto/common_cert_set_2.c", "quiche/quic/core/crypto/common_cert_set_2a.inc", "quiche/quic/core/crypto/common_cert_set_2b.inc", "quiche/quic/core/crypto/common_cert_set_3.c", "quiche/quic/core/crypto/common_cert_set_3a.inc", "quiche/quic/core/crypto/common_cert_set_3b.inc", ], visibility = ["//visibility:public"], deps = [ ":quic_core_clock_lib", ":quic_core_crypto_encryption_lib", ":quic_core_crypto_hkdf_lib", ":quic_core_crypto_proof_source_interface_lib", ":quic_core_crypto_random_lib", ":quic_core_crypto_tls_handshake_lib", ":quic_core_data_lib", ":quic_core_error_codes_lib", ":quic_core_lru_cache_lib", ":quic_core_packets_lib", ":quic_core_proto_cached_network_parameters_proto_header", ":quic_core_proto_crypto_server_config_proto_header", ":quic_core_proto_source_address_token_proto_header", ":quic_core_server_id_lib", ":quic_core_socket_address_coder_lib", ":quic_core_time_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_core_versions_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_crypto_boring_utils_lib", hdrs = ["quiche/quic/core/crypto/boring_utils.h"], copts = quiche_copts, external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_platform_export", ":quiche_common_platform", ], ) envoy_cc_library( name = "quic_core_crypto_certificate_view_lib", srcs = ["quiche/quic/core/crypto/certificate_view.cc"], hdrs = ["quiche/quic/core/crypto/certificate_view.h"], copts = quiche_copts, external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_crypto_boring_utils_lib", ":quic_core_types_lib", ":quic_platform", ":quic_platform_ip_address", ":quiche_common_platform", ], ) envoy_cc_library( name = "quic_core_crypto_encryption_lib", srcs = [ "quiche/quic/core/crypto/aead_base_decrypter.cc", "quiche/quic/core/crypto/aead_base_encrypter.cc", "quiche/quic/core/crypto/aes_128_gcm_12_decrypter.cc", "quiche/quic/core/crypto/aes_128_gcm_12_encrypter.cc", "quiche/quic/core/crypto/aes_128_gcm_decrypter.cc", "quiche/quic/core/crypto/aes_128_gcm_encrypter.cc", "quiche/quic/core/crypto/aes_256_gcm_decrypter.cc", "quiche/quic/core/crypto/aes_256_gcm_encrypter.cc", "quiche/quic/core/crypto/aes_base_decrypter.cc", "quiche/quic/core/crypto/aes_base_encrypter.cc", "quiche/quic/core/crypto/chacha20_poly1305_decrypter.cc", "quiche/quic/core/crypto/chacha20_poly1305_encrypter.cc", "quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter.cc", "quiche/quic/core/crypto/chacha20_poly1305_tls_encrypter.cc", "quiche/quic/core/crypto/chacha_base_decrypter.cc", "quiche/quic/core/crypto/chacha_base_encrypter.cc", "quiche/quic/core/crypto/null_decrypter.cc", "quiche/quic/core/crypto/null_encrypter.cc", "quiche/quic/core/crypto/quic_crypter.cc", "quiche/quic/core/crypto/quic_decrypter.cc", "quiche/quic/core/crypto/quic_encrypter.cc", ], hdrs = [ "quiche/quic/core/crypto/aead_base_decrypter.h", "quiche/quic/core/crypto/aead_base_encrypter.h", "quiche/quic/core/crypto/aes_128_gcm_12_decrypter.h", "quiche/quic/core/crypto/aes_128_gcm_12_encrypter.h", "quiche/quic/core/crypto/aes_128_gcm_decrypter.h", "quiche/quic/core/crypto/aes_128_gcm_encrypter.h", "quiche/quic/core/crypto/aes_256_gcm_decrypter.h", "quiche/quic/core/crypto/aes_256_gcm_encrypter.h", "quiche/quic/core/crypto/aes_base_decrypter.h", "quiche/quic/core/crypto/aes_base_encrypter.h", "quiche/quic/core/crypto/chacha20_poly1305_decrypter.h", "quiche/quic/core/crypto/chacha20_poly1305_encrypter.h", "quiche/quic/core/crypto/chacha20_poly1305_tls_decrypter.h", "quiche/quic/core/crypto/chacha20_poly1305_tls_encrypter.h", "quiche/quic/core/crypto/chacha_base_decrypter.h", "quiche/quic/core/crypto/chacha_base_encrypter.h", "quiche/quic/core/crypto/crypto_protocol.h", "quiche/quic/core/crypto/null_decrypter.h", "quiche/quic/core/crypto/null_encrypter.h", "quiche/quic/core/crypto/quic_crypter.h", "quiche/quic/core/crypto/quic_decrypter.h", "quiche/quic/core/crypto/quic_encrypter.h", ], copts = quiche_copts, external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_crypto_hkdf_lib", ":quic_core_data_lib", ":quic_core_packets_lib", ":quic_core_tag_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_crypto_hkdf_lib", srcs = ["quiche/quic/core/crypto/quic_hkdf.cc"], hdrs = ["quiche/quic/core/crypto/quic_hkdf.h"], external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_crypto_proof_source_interface_lib", srcs = [ "quiche/quic/core/crypto/proof_source.cc", "quiche/quic/core/crypto/quic_crypto_proof.cc", ], hdrs = [ "quiche/quic/core/crypto/proof_source.h", "quiche/quic/core/crypto/quic_crypto_proof.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_packets_lib", ":quic_core_versions_lib", ":quic_platform_base", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_crypto_random_lib", srcs = ["quiche/quic/core/crypto/quic_random.cc"], hdrs = ["quiche/quic/core/crypto/quic_random.h"], copts = quiche_copts, external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [":quic_platform_base"], ) envoy_cc_library( name = "quic_core_crypto_tls_handshake_lib", srcs = [ "quiche/quic/core/crypto/tls_client_connection.cc", "quiche/quic/core/crypto/tls_connection.cc", "quiche/quic/core/crypto/tls_server_connection.cc", ], hdrs = [ "quiche/quic/core/crypto/tls_client_connection.h", "quiche/quic/core/crypto/tls_connection.h", "quiche/quic/core/crypto/tls_server_connection.h", ], copts = quiche_copts, external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_crypto_proof_source_interface_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_circular_deque_lib", hdrs = ["quiche/quic/core/quic_circular_deque.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_data_lib", srcs = [ "quiche/quic/core/quic_data_reader.cc", "quiche/quic/core/quic_data_writer.cc", ], hdrs = [ "quiche/quic/core/quic_data_reader.h", "quiche/quic/core/quic_data_writer.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_constants_lib", ":quic_core_crypto_random_lib", ":quic_core_packets_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_error_codes_lib", srcs = ["quiche/quic/core/quic_error_codes.cc"], hdrs = ["quiche/quic/core/quic_error_codes.h"], copts = quiche_copts, external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_platform_base", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_framer_lib", srcs = ["quiche/quic/core/quic_framer.cc"], hdrs = ["quiche/quic/core/quic_framer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_constants_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_crypto_encryption_lib", ":quic_core_crypto_random_lib", ":quic_core_data_lib", ":quic_core_packets_lib", ":quic_core_socket_address_coder_lib", ":quic_core_stream_frame_data_producer_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_core_versions_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_frames_frames_lib", srcs = [ "quiche/quic/core/frames/quic_ack_frame.cc", "quiche/quic/core/frames/quic_ack_frequency_frame.cc", "quiche/quic/core/frames/quic_blocked_frame.cc", "quiche/quic/core/frames/quic_connection_close_frame.cc", "quiche/quic/core/frames/quic_crypto_frame.cc", "quiche/quic/core/frames/quic_frame.cc", "quiche/quic/core/frames/quic_goaway_frame.cc", "quiche/quic/core/frames/quic_handshake_done_frame.cc", "quiche/quic/core/frames/quic_max_streams_frame.cc", "quiche/quic/core/frames/quic_message_frame.cc", "quiche/quic/core/frames/quic_new_connection_id_frame.cc", "quiche/quic/core/frames/quic_new_token_frame.cc", "quiche/quic/core/frames/quic_padding_frame.cc", "quiche/quic/core/frames/quic_path_challenge_frame.cc", "quiche/quic/core/frames/quic_path_response_frame.cc", "quiche/quic/core/frames/quic_ping_frame.cc", "quiche/quic/core/frames/quic_retire_connection_id_frame.cc", "quiche/quic/core/frames/quic_rst_stream_frame.cc", "quiche/quic/core/frames/quic_stop_sending_frame.cc", "quiche/quic/core/frames/quic_stop_waiting_frame.cc", "quiche/quic/core/frames/quic_stream_frame.cc", "quiche/quic/core/frames/quic_streams_blocked_frame.cc", "quiche/quic/core/frames/quic_window_update_frame.cc", ], hdrs = [ "quiche/quic/core/frames/quic_ack_frame.h", "quiche/quic/core/frames/quic_ack_frequency_frame.h", "quiche/quic/core/frames/quic_blocked_frame.h", "quiche/quic/core/frames/quic_connection_close_frame.h", "quiche/quic/core/frames/quic_crypto_frame.h", "quiche/quic/core/frames/quic_frame.h", "quiche/quic/core/frames/quic_goaway_frame.h", "quiche/quic/core/frames/quic_handshake_done_frame.h", "quiche/quic/core/frames/quic_inlined_frame.h", "quiche/quic/core/frames/quic_max_streams_frame.h", "quiche/quic/core/frames/quic_message_frame.h", "quiche/quic/core/frames/quic_mtu_discovery_frame.h", "quiche/quic/core/frames/quic_new_connection_id_frame.h", "quiche/quic/core/frames/quic_new_token_frame.h", "quiche/quic/core/frames/quic_padding_frame.h", "quiche/quic/core/frames/quic_path_challenge_frame.h", "quiche/quic/core/frames/quic_path_response_frame.h", "quiche/quic/core/frames/quic_ping_frame.h", "quiche/quic/core/frames/quic_retire_connection_id_frame.h", "quiche/quic/core/frames/quic_rst_stream_frame.h", "quiche/quic/core/frames/quic_stop_sending_frame.h", "quiche/quic/core/frames/quic_stop_waiting_frame.h", "quiche/quic/core/frames/quic_stream_frame.h", "quiche/quic/core/frames/quic_streams_blocked_frame.h", "quiche/quic/core/frames/quic_window_update_frame.h", ], copts = quiche_copts, # TODO: Work around initializer in anonymous union in fastbuild build. # Remove this after upstream fix. defines = select({ "@envoy//bazel:windows_x86_64": ["QUIC_FRAME_DEBUG=0"], "//conditions:default": [], }), repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_buffer_allocator_lib", ":quic_core_constants_lib", ":quic_core_error_codes_lib", ":quic_core_interval_lib", ":quic_core_interval_set_lib", ":quic_core_types_lib", ":quic_core_versions_lib", ":quic_platform_base", ":quic_platform_mem_slice_span", ], ) envoy_cc_library( name = "quic_core_http_http_constants_lib", hdrs = ["quiche/quic/core/http/http_constants.h"], copts = quiche_copts, repository = "@envoy", deps = [":quic_core_types_lib"], ) envoy_cc_library( name = "quic_core_http_client_lib", srcs = [ "quiche/quic/core/http/quic_client_promised_info.cc", "quiche/quic/core/http/quic_client_push_promise_index.cc", "quiche/quic/core/http/quic_spdy_client_session.cc", "quiche/quic/core/http/quic_spdy_client_session_base.cc", "quiche/quic/core/http/quic_spdy_client_stream.cc", ], hdrs = [ "quiche/quic/core/http/quic_client_promised_info.h", "quiche/quic/core/http/quic_client_push_promise_index.h", "quiche/quic/core/http/quic_spdy_client_session.h", "quiche/quic/core/http/quic_spdy_client_session_base.h", "quiche/quic/core/http/quic_spdy_client_stream.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_alarm_interface_lib", ":quic_core_crypto_encryption_lib", ":quic_core_http_spdy_session_lib", ":quic_core_packets_lib", ":quic_core_qpack_qpack_streams_lib", ":quic_core_server_id_lib", ":quic_core_session_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_platform_base", ":spdy_core_framer_lib", ":spdy_core_protocol_lib", "@envoy//source/extensions/quic_listeners/quiche:spdy_server_push_utils_for_envoy_lib", ], ) envoy_cc_library( name = "quic_core_http_header_list_lib", srcs = ["quiche/quic/core/http/quic_header_list.cc"], hdrs = ["quiche/quic/core/http/quic_header_list.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_circular_deque_lib", ":quic_core_packets_lib", ":quic_core_qpack_qpack_header_table_lib", ":quic_platform_base", ":spdy_core_header_block_lib", ":spdy_core_headers_handler_interface_lib", ":spdy_core_protocol_lib", ], ) envoy_cc_library( name = "quic_core_http_http_decoder_lib", srcs = ["quiche/quic/core/http/http_decoder.cc"], hdrs = ["quiche/quic/core/http/http_decoder.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_data_lib", ":quic_core_error_codes_lib", ":quic_core_http_http_frames_lib", ":quic_core_http_spdy_utils_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_http_http_encoder_lib", srcs = ["quiche/quic/core/http/http_encoder.cc"], hdrs = ["quiche/quic/core/http/http_encoder.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_data_lib", ":quic_core_error_codes_lib", ":quic_core_http_http_frames_lib", ":quic_core_http_spdy_utils_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_http_http_frames_lib", hdrs = ["quiche/quic/core/http/http_frames.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_types_lib", ":quic_platform_base", ":spdy_core_framer_lib", ], ) envoy_cc_library( name = "quic_core_http_spdy_server_push_utils_header", hdrs = ["quiche/quic/core/http/spdy_server_push_utils.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_platform_base", ":spdy_core_header_block_lib", ], ) envoy_cc_library( name = "quic_core_http_spdy_session_lib", srcs = [ "quiche/quic/core/http/quic_headers_stream.cc", "quiche/quic/core/http/quic_receive_control_stream.cc", "quiche/quic/core/http/quic_send_control_stream.cc", "quiche/quic/core/http/quic_server_session_base.cc", "quiche/quic/core/http/quic_spdy_server_stream_base.cc", "quiche/quic/core/http/quic_spdy_session.cc", "quiche/quic/core/http/quic_spdy_stream.cc", ], hdrs = [ "quiche/quic/core/http/quic_headers_stream.h", "quiche/quic/core/http/quic_receive_control_stream.h", "quiche/quic/core/http/quic_send_control_stream.h", "quiche/quic/core/http/quic_server_session_base.h", "quiche/quic/core/http/quic_spdy_server_stream_base.h", "quiche/quic/core/http/quic_spdy_session.h", "quiche/quic/core/http/quic_spdy_stream.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_connection_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_error_codes_lib", ":quic_core_http_header_list_lib", ":quic_core_http_http_constants_lib", ":quic_core_http_http_decoder_lib", ":quic_core_http_http_encoder_lib", ":quic_core_http_spdy_stream_body_manager_lib", ":quic_core_http_spdy_utils_lib", ":quic_core_packets_lib", ":quic_core_proto_cached_network_parameters_proto_header", ":quic_core_qpack_qpack_decoded_headers_accumulator_lib", ":quic_core_qpack_qpack_decoder_lib", ":quic_core_qpack_qpack_decoder_stream_sender_lib", ":quic_core_qpack_qpack_encoder_lib", ":quic_core_qpack_qpack_encoder_stream_sender_lib", ":quic_core_qpack_qpack_streams_lib", ":quic_core_session_lib", ":quic_core_utils_lib", ":quic_core_versions_lib", ":quic_platform_base", ":quic_platform_mem_slice_storage", ":spdy_core_framer_lib", ":spdy_core_http2_deframer_lib", ":spdy_core_protocol_lib", ], ) envoy_cc_library( name = "quic_core_http_spdy_stream_body_manager_lib", srcs = ["quiche/quic/core/http/quic_spdy_stream_body_manager.cc"], hdrs = ["quiche/quic/core/http/quic_spdy_stream_body_manager.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_http_http_decoder_lib", ":quic_core_session_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_http_spdy_utils_lib", srcs = ["quiche/quic/core/http/spdy_utils.cc"], hdrs = ["quiche/quic/core/http/spdy_utils.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_http_header_list_lib", ":quic_core_http_http_constants_lib", ":quic_core_packets_lib", ":quic_platform_base", ":spdy_core_framer_lib", ":spdy_core_protocol_lib", ], ) envoy_cc_library( name = "quic_core_idle_network_detector_lib", srcs = ["quiche/quic/core/quic_idle_network_detector.cc"], hdrs = ["quiche/quic/core/quic_idle_network_detector.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_alarm_factory_interface_lib", ":quic_core_alarm_interface_lib", ":quic_core_constants_lib", ":quic_core_one_block_arena_lib", ":quic_core_time_lib", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_interval_lib", hdrs = ["quiche/quic/core/quic_interval.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], ) envoy_cc_library( name = "quic_core_interval_deque_lib", hdrs = ["quiche/quic/core/quic_interval_deque.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_circular_deque_lib", ":quic_core_interval_lib", ":quic_core_types_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_interval_set_lib", hdrs = ["quiche/quic/core/quic_interval_set.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_interval_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_lru_cache_lib", hdrs = ["quiche/quic/core/quic_lru_cache.h"], repository = "@envoy", tags = ["nofips"], deps = [":quic_platform_base"], ) envoy_cc_library( name = "quic_core_mtu_discovery_lib", srcs = ["quiche/quic/core/quic_mtu_discovery.cc"], hdrs = ["quiche/quic/core/quic_mtu_discovery.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":quic_core_constants_lib", ], ) envoy_cc_library( name = "quic_core_one_block_arena_lib", srcs = ["quiche/quic/core/quic_one_block_arena.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_arena_scoped_ptr_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_syscall_wrapper_lib", srcs = select({ "@envoy//bazel:linux": ["quiche/quic/core/quic_syscall_wrapper.cc"], "//conditions:default": [], }), hdrs = select({ "@envoy//bazel:linux": ["quiche/quic/core/quic_syscall_wrapper.h"], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_legacy_version_encapsulator_lib", srcs = [ "quiche/quic/core/quic_legacy_version_encapsulator.cc", ], hdrs = [ "quiche/quic/core/quic_legacy_version_encapsulator.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_crypto_crypto_handshake_lib", ":quic_core_crypto_encryption_lib", ":quic_core_packet_creator_lib", ":quic_core_packets_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_platform", ":quiche_common_platform", ], ) envoy_cc_library( name = "quic_core_linux_socket_utils_lib", srcs = select({ "@envoy//bazel:linux": ["quiche/quic/core/quic_linux_socket_utils.cc"], "//conditions:default": [], }), hdrs = select({ "@envoy//bazel:linux": ["quiche/quic/core/quic_linux_socket_utils.h"], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_packet_writer_interface_lib", ":quic_core_syscall_wrapper_lib", ":quic_core_types_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_network_blackhole_detector_lib", srcs = ["quiche/quic/core/quic_network_blackhole_detector.cc"], hdrs = ["quiche/quic/core/quic_network_blackhole_detector.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_alarm_factory_interface_lib", ":quic_core_alarm_interface_lib", ":quic_core_constants_lib", ":quic_core_one_block_arena_lib", ":quic_core_time_lib", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_packet_creator_lib", srcs = ["quiche/quic/core/quic_packet_creator.cc"], hdrs = ["quiche/quic/core/quic_packet_creator.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_circular_deque_lib", ":quic_core_coalesced_packet_lib", ":quic_core_constants_lib", ":quic_core_crypto_encryption_lib", ":quic_core_data_lib", ":quic_core_framer_lib", ":quic_core_packets_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_core_versions_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_packet_number_indexed_queue_lib", hdrs = ["quiche/quic/core/packet_number_indexed_queue.h"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_circular_deque_lib", ":quic_core_constants_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_packet_writer_interface_lib", srcs = ["quiche/quic/core/quic_packet_writer_wrapper.cc"], hdrs = [ "quiche/quic/core/quic_packet_writer.h", "quiche/quic/core/quic_packet_writer_wrapper.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_packets_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_packets_lib", srcs = [ "quiche/quic/core/quic_packets.cc", "quiche/quic/core/quic_write_blocked_list.cc", ], hdrs = [ "quiche/quic/core/quic_packets.h", "quiche/quic/core/quic_write_blocked_list.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_ack_listener_interface_lib", ":quic_core_bandwidth_lib", ":quic_core_constants_lib", ":quic_core_error_codes_lib", ":quic_core_frames_frames_lib", ":quic_core_time_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_core_versions_lib", ":quic_platform", ":quic_platform_socket_address", ":spdy_core_fifo_write_scheduler_lib", ":spdy_core_http2_priority_write_scheduler_lib", ":spdy_core_lifo_write_scheduler_lib", ":spdy_core_priority_write_scheduler_lib", ], ) envoy_cc_library( name = "quic_core_process_packet_interface_lib", hdrs = ["quiche/quic/core/quic_process_packet_interface.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_packets_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_blocking_manager_lib", srcs = ["quiche/quic/core/qpack/qpack_blocking_manager.cc"], hdrs = ["quiche/quic/core/qpack/qpack_blocking_manager.h"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_decoder_lib", srcs = ["quiche/quic/core/qpack/qpack_decoder.cc"], hdrs = ["quiche/quic/core/qpack/qpack_decoder.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_qpack_qpack_decoder_stream_sender_lib", ":quic_core_qpack_qpack_encoder_stream_receiver_lib", ":quic_core_qpack_qpack_header_table_lib", ":quic_core_qpack_qpack_progressive_decoder_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_encoder_lib", srcs = ["quiche/quic/core/qpack/qpack_encoder.cc"], hdrs = ["quiche/quic/core/qpack/qpack_encoder.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_qpack_blocking_manager_lib", ":quic_core_qpack_qpack_decoder_stream_receiver_lib", ":quic_core_qpack_qpack_encoder_stream_sender_lib", ":quic_core_qpack_qpack_header_table_lib", ":quic_core_qpack_qpack_index_conversions_lib", ":quic_core_qpack_qpack_instruction_encoder_lib", ":quic_core_qpack_qpack_instructions_lib", ":quic_core_qpack_qpack_required_insert_count_lib", ":quic_core_qpack_value_splitting_header_list_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_header_table_lib", srcs = ["quiche/quic/core/qpack/qpack_header_table.cc"], hdrs = ["quiche/quic/core/qpack/qpack_header_table.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_qpack_qpack_static_table_lib", ":quic_platform_base", ":spdy_core_hpack_hpack_lib", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_instruction_decoder_lib", srcs = ["quiche/quic/core/qpack/qpack_instruction_decoder.cc"], hdrs = ["quiche/quic/core/qpack/qpack_instruction_decoder.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":http2_hpack_huffman_hpack_huffman_decoder_lib", ":http2_hpack_varint_hpack_varint_decoder_lib", ":quic_core_qpack_qpack_instructions_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_instructions_lib", srcs = ["quiche/quic/core/qpack/qpack_instructions.cc"], hdrs = ["quiche/quic/core/qpack/qpack_instructions.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":quic_platform_base"], ) envoy_cc_library( name = "quic_core_qpack_qpack_instruction_encoder_lib", srcs = ["quiche/quic/core/qpack/qpack_instruction_encoder.cc"], hdrs = ["quiche/quic/core/qpack/qpack_instruction_encoder.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":http2_hpack_huffman_hpack_huffman_encoder_lib", ":http2_hpack_varint_hpack_varint_encoder_lib", ":quic_core_qpack_qpack_instructions_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_progressive_decoder_lib", srcs = ["quiche/quic/core/qpack/qpack_progressive_decoder.cc"], hdrs = ["quiche/quic/core/qpack/qpack_progressive_decoder.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_qpack_qpack_decoder_stream_sender_lib", ":quic_core_qpack_qpack_encoder_stream_receiver_lib", ":quic_core_qpack_qpack_header_table_lib", ":quic_core_qpack_qpack_index_conversions_lib", ":quic_core_qpack_qpack_instruction_decoder_lib", ":quic_core_qpack_qpack_instructions_lib", ":quic_core_qpack_qpack_required_insert_count_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_required_insert_count_lib", srcs = ["quiche/quic/core/qpack/qpack_required_insert_count.cc"], hdrs = ["quiche/quic/core/qpack/qpack_required_insert_count.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":quic_platform_base"], ) envoy_cc_library( name = "quic_core_qpack_qpack_encoder_stream_sender_lib", srcs = ["quiche/quic/core/qpack/qpack_encoder_stream_sender.cc"], hdrs = ["quiche/quic/core/qpack/qpack_encoder_stream_sender.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_qpack_qpack_instruction_encoder_lib", ":quic_core_qpack_qpack_instructions_lib", ":quic_core_qpack_qpack_stream_sender_delegate_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_encoder_stream_receiver_lib", srcs = ["quiche/quic/core/qpack/qpack_encoder_stream_receiver.cc"], hdrs = ["quiche/quic/core/qpack/qpack_encoder_stream_receiver.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":quic_core_qpack_qpack_instruction_decoder_lib", ":quic_core_qpack_qpack_instructions_lib", ":quic_core_qpack_qpack_stream_receiver_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_decoder_stream_sender_lib", srcs = ["quiche/quic/core/qpack/qpack_decoder_stream_sender.cc"], hdrs = ["quiche/quic/core/qpack/qpack_decoder_stream_sender.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_qpack_qpack_instruction_encoder_lib", ":quic_core_qpack_qpack_instructions_lib", ":quic_core_qpack_qpack_stream_sender_delegate_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_decoder_stream_receiver_lib", srcs = ["quiche/quic/core/qpack/qpack_decoder_stream_receiver.cc"], hdrs = ["quiche/quic/core/qpack/qpack_decoder_stream_receiver.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":http2_decoder_decode_buffer_lib", ":http2_decoder_decode_status_lib", ":quic_core_qpack_qpack_instruction_decoder_lib", ":quic_core_qpack_qpack_instructions_lib", ":quic_core_qpack_qpack_stream_receiver_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_index_conversions_lib", srcs = ["quiche/quic/core/qpack/qpack_index_conversions.cc"], hdrs = ["quiche/quic/core/qpack/qpack_index_conversions.h"], copts = quiche_copts, repository = "@envoy", deps = [ ":quic_platform_base", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_static_table_lib", srcs = ["quiche/quic/core/qpack/qpack_static_table.cc"], hdrs = ["quiche/quic/core/qpack/qpack_static_table.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_platform_base", ":spdy_core_hpack_hpack_lib", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_stream_receiver_lib", hdrs = ["quiche/quic/core/qpack/qpack_stream_receiver.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":quic_platform_base"], ) envoy_cc_library( name = "quic_core_qpack_qpack_streams_lib", srcs = [ "quiche/quic/core/qpack/qpack_receive_stream.cc", "quiche/quic/core/qpack/qpack_send_stream.cc", ], hdrs = [ "quiche/quic/core/qpack/qpack_receive_stream.h", "quiche/quic/core/qpack/qpack_send_stream.h", ], copts = quiche_copts, repository = "@envoy", deps = [ ":quic_core_qpack_qpack_stream_receiver_lib", ":quic_core_qpack_qpack_stream_sender_delegate_lib", ":quic_core_session_lib", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_decoded_headers_accumulator_lib", srcs = ["quiche/quic/core/qpack/qpack_decoded_headers_accumulator.cc"], hdrs = ["quiche/quic/core/qpack/qpack_decoded_headers_accumulator.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_http_header_list_lib", ":quic_core_qpack_qpack_decoder_lib", ":quic_core_qpack_qpack_progressive_decoder_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_qpack_value_splitting_header_list_lib", srcs = ["quiche/quic/core/qpack/value_splitting_header_list.cc"], hdrs = ["quiche/quic/core/qpack/value_splitting_header_list.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_platform_base", ":spdy_core_header_block_lib", ], ) envoy_cc_library( name = "quic_core_qpack_qpack_stream_sender_delegate_lib", hdrs = ["quiche/quic/core/qpack/qpack_stream_sender_delegate.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":quic_platform_base"], ) envoy_cc_library( name = "quic_core_received_packet_manager_lib", srcs = ["quiche/quic/core/quic_received_packet_manager.cc"], hdrs = ["quiche/quic/core/quic_received_packet_manager.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_config_lib", ":quic_core_congestion_control_rtt_stats_lib", ":quic_core_connection_stats_lib", ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", ":quic_core_packets_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_sent_packet_manager_lib", srcs = ["quiche/quic/core/quic_sent_packet_manager.cc"], hdrs = ["quiche/quic/core/quic_sent_packet_manager.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_congestion_control_congestion_control_lib", ":quic_core_congestion_control_general_loss_algorithm_lib", ":quic_core_congestion_control_pacing_sender_lib", ":quic_core_congestion_control_rtt_stats_lib", ":quic_core_congestion_control_uber_loss_algorithm_lib", ":quic_core_connection_stats_lib", ":quic_core_crypto_encryption_lib", ":quic_core_packets_lib", ":quic_core_proto_cached_network_parameters_proto_header", ":quic_core_sustained_bandwidth_recorder_lib", ":quic_core_transmission_info_lib", ":quic_core_types_lib", ":quic_core_unacked_packet_map_lib", ":quic_core_utils_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_server_id_lib", srcs = ["quiche/quic/core/quic_server_id.cc"], hdrs = ["quiche/quic/core/quic_server_id.h"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_server_lib", srcs = [ "quiche/quic/core/chlo_extractor.cc", "quiche/quic/core/quic_buffered_packet_store.cc", "quiche/quic/core/quic_dispatcher.cc", "quiche/quic/core/tls_chlo_extractor.cc", ], hdrs = [ "quiche/quic/core/chlo_extractor.h", "quiche/quic/core/quic_buffered_packet_store.h", "quiche/quic/core/quic_dispatcher.h", "quiche/quic/core/tls_chlo_extractor.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_alarm_factory_interface_lib", ":quic_core_alarm_interface_lib", ":quic_core_blocked_writer_interface_lib", ":quic_core_connection_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_crypto_encryption_lib", ":quic_core_crypto_random_lib", ":quic_core_framer_lib", ":quic_core_packets_lib", ":quic_core_process_packet_interface_lib", ":quic_core_session_lib", ":quic_core_time_lib", ":quic_core_time_wait_list_manager_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_core_version_manager_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_session_lib", srcs = [ "quiche/quic/core/legacy_quic_stream_id_manager.cc", "quiche/quic/core/quic_control_frame_manager.cc", "quiche/quic/core/quic_crypto_client_handshaker.cc", "quiche/quic/core/quic_crypto_client_stream.cc", "quiche/quic/core/quic_crypto_handshaker.cc", "quiche/quic/core/quic_crypto_server_stream.cc", "quiche/quic/core/quic_crypto_server_stream_base.cc", "quiche/quic/core/quic_crypto_stream.cc", "quiche/quic/core/quic_datagram_queue.cc", "quiche/quic/core/quic_flow_controller.cc", "quiche/quic/core/quic_session.cc", "quiche/quic/core/quic_stream.cc", "quiche/quic/core/quic_stream_id_manager.cc", "quiche/quic/core/quic_stream_sequencer.cc", "quiche/quic/core/tls_client_handshaker.cc", "quiche/quic/core/tls_handshaker.cc", "quiche/quic/core/tls_server_handshaker.cc", "quiche/quic/core/uber_quic_stream_id_manager.cc", ], hdrs = [ "quiche/quic/core/handshaker_delegate_interface.h", "quiche/quic/core/legacy_quic_stream_id_manager.h", "quiche/quic/core/quic_control_frame_manager.h", "quiche/quic/core/quic_crypto_client_handshaker.h", "quiche/quic/core/quic_crypto_client_stream.h", "quiche/quic/core/quic_crypto_handshaker.h", "quiche/quic/core/quic_crypto_server_stream.h", "quiche/quic/core/quic_crypto_server_stream_base.h", "quiche/quic/core/quic_crypto_stream.h", "quiche/quic/core/quic_datagram_queue.h", "quiche/quic/core/quic_flow_controller.h", "quiche/quic/core/quic_session.h", "quiche/quic/core/quic_stream.h", "quiche/quic/core/quic_stream_id_manager.h", "quiche/quic/core/quic_stream_sequencer.h", "quiche/quic/core/stream_delegate_interface.h", "quiche/quic/core/tls_client_handshaker.h", "quiche/quic/core/tls_handshaker.h", "quiche/quic/core/tls_server_handshaker.h", "quiche/quic/core/uber_quic_stream_id_manager.h", ], copts = quiche_copts, external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_config_lib", ":quic_core_connection_lib", ":quic_core_constants_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_crypto_encryption_lib", ":quic_core_crypto_random_lib", ":quic_core_crypto_tls_handshake_lib", ":quic_core_frames_frames_lib", ":quic_core_packet_creator_lib", ":quic_core_packets_lib", ":quic_core_server_id_lib", ":quic_core_session_notifier_interface_lib", ":quic_core_stream_frame_data_producer_lib", ":quic_core_stream_send_buffer_lib", ":quic_core_stream_sequencer_buffer_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_core_versions_lib", ":quic_platform", ":quic_platform_mem_slice_span", ":spdy_core_protocol_lib", ], ) envoy_cc_library( name = "quic_core_session_notifier_interface_lib", hdrs = ["quiche/quic/core/session_notifier_interface.h"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_frames_frames_lib", ":quic_core_time_lib", ], ) envoy_cc_library( name = "quic_core_socket_address_coder_lib", srcs = ["quiche/quic/core/quic_socket_address_coder.cc"], hdrs = ["quiche/quic/core/quic_socket_address_coder.h"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_platform_base", ":quic_platform_socket_address", ":spdy_core_priority_write_scheduler_lib", ], ) envoy_cc_library( name = "quic_core_stream_frame_data_producer_lib", hdrs = ["quiche/quic/core/quic_stream_frame_data_producer.h"], repository = "@envoy", tags = ["nofips"], deps = [":quic_core_types_lib"], ) envoy_cc_library( name = "quic_core_stream_send_buffer_lib", srcs = ["quiche/quic/core/quic_stream_send_buffer.cc"], hdrs = ["quiche/quic/core/quic_stream_send_buffer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_circular_deque_lib", ":quic_core_data_lib", ":quic_core_frames_frames_lib", ":quic_core_interval_deque_lib", ":quic_core_interval_lib", ":quic_core_interval_set_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_platform_base", ":quic_platform_mem_slice_span", ], ) envoy_cc_library( name = "quic_core_stream_sequencer_buffer_lib", srcs = ["quiche/quic/core/quic_stream_sequencer_buffer.cc"], hdrs = ["quiche/quic/core/quic_stream_sequencer_buffer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_constants_lib", ":quic_core_interval_lib", ":quic_core_interval_set_lib", ":quic_core_packets_lib", ":quic_core_types_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_sustained_bandwidth_recorder_lib", srcs = ["quiche/quic/core/quic_sustained_bandwidth_recorder.cc"], hdrs = ["quiche/quic/core/quic_sustained_bandwidth_recorder.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_bandwidth_lib", ":quic_core_time_lib", ":quic_platform_base", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_tag_lib", srcs = ["quiche/quic/core/quic_tag.cc"], hdrs = ["quiche/quic/core/quic_tag.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [":quic_platform_base"], ) envoy_cc_library( name = "quic_core_time_lib", srcs = ["quiche/quic/core/quic_time.cc"], hdrs = ["quiche/quic/core/quic_time.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [":quic_platform_base"], ) envoy_cc_library( name = "quic_core_time_accumulator_lib", hdrs = ["quiche/quic/core/quic_time_accumulator.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [], ) envoy_cc_library( name = "quic_core_time_wait_list_manager_lib", srcs = ["quiche/quic/core/quic_time_wait_list_manager.cc"], hdrs = ["quiche/quic/core/quic_time_wait_list_manager.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_blocked_writer_interface_lib", ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", ":quic_core_packet_writer_interface_lib", ":quic_core_packets_lib", ":quic_core_session_lib", ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_platform", ], ) envoy_cc_library( name = "quic_core_transmission_info_lib", srcs = ["quiche/quic/core/quic_transmission_info.cc"], hdrs = ["quiche/quic/core/quic_transmission_info.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_ack_listener_interface_lib", ":quic_core_frames_frames_lib", ":quic_core_types_lib", ":quic_platform_export", ], ) envoy_cc_library( name = "quic_core_types_lib", srcs = [ "quiche/quic/core/quic_connection_id.cc", "quiche/quic/core/quic_packet_number.cc", "quiche/quic/core/quic_types.cc", ], hdrs = [ "quiche/quic/core/quic_connection_id.h", "quiche/quic/core/quic_packet_number.h", "quiche/quic/core/quic_types.h", ], copts = quiche_copts, external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_crypto_random_lib", ":quic_core_error_codes_lib", ":quic_core_time_lib", ":quic_platform_base", ":quiche_common_platform_endian", ], ) envoy_cc_library( name = "quic_core_uber_received_packet_manager_lib", srcs = ["quiche/quic/core/uber_received_packet_manager.cc"], hdrs = ["quiche/quic/core/uber_received_packet_manager.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_received_packet_manager_lib", ":quic_core_utils_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_udp_socket_lib", srcs = select({ "@envoy//bazel:windows_x86_64": [], "//conditions:default": ["quiche/quic/core/quic_udp_socket_posix.cc"], }), hdrs = select({ "@envoy//bazel:windows_x86_64": [], "//conditions:default": ["quiche/quic/core/quic_udp_socket.h"], }), copts = quiche_copts + select({ # On OSX/iOS, condstants from RFC 3542 (e.g. IPV6_RECVPKTINFO) are not usable # without this define. "@envoy//bazel:apple": ["-D__APPLE_USE_RFC_3542"], "//conditions:default": [], }), repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_types_lib", ":quic_core_utils_lib", ":quic_platform", ":quic_platform_udp_socket", ], ) envoy_cc_library( name = "quic_core_unacked_packet_map_lib", srcs = ["quiche/quic/core/quic_unacked_packet_map.cc"], hdrs = ["quiche/quic/core/quic_unacked_packet_map.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_connection_stats_lib", ":quic_core_packets_lib", ":quic_core_session_notifier_interface_lib", ":quic_core_transmission_info_lib", ":quic_core_utils_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_utils_lib", srcs = ["quiche/quic/core/quic_utils.cc"], hdrs = ["quiche/quic/core/quic_utils.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_constants_lib", ":quic_core_crypto_random_lib", ":quic_core_error_codes_lib", ":quic_core_frames_frames_lib", ":quic_core_types_lib", ":quic_core_versions_lib", ":quic_platform_base", ":quic_platform_socket_address", ], ) envoy_cc_library( name = "quic_core_version_manager_lib", srcs = ["quiche/quic/core/quic_version_manager.cc"], hdrs = ["quiche/quic/core/quic_version_manager.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_versions_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_core_versions_lib", srcs = ["quiche/quic/core/quic_versions.cc"], hdrs = ["quiche/quic/core/quic_versions.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quic_core_crypto_random_lib", ":quic_core_tag_lib", ":quic_core_types_lib", ":quic_platform_base", ":quiche_common_platform_endian", ], ) envoy_cc_test_library( name = "quic_test_tools_config_peer_lib", srcs = ["quiche/quic/test_tools/quic_config_peer.cc"], hdrs = ["quiche/quic/test_tools/quic_config_peer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_config_lib", ":quic_core_packets_lib", ":quic_platform_base", ], ) envoy_cc_test_library( name = "quic_test_tools_crypto_server_config_peer_lib", srcs = [ "quiche/quic/test_tools/quic_crypto_server_config_peer.cc", ], hdrs = [ "quiche/quic/test_tools/quic_crypto_server_config_peer.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_crypto_crypto_handshake_lib", ":quic_test_tools_mock_clock_lib", ":quic_test_tools_mock_random_lib", ":quic_test_tools_test_utils_interface_lib", ":quiche_common_platform", ], ) envoy_cc_test_library( name = "quic_test_tools_first_flight_lib", srcs = [ "quiche/quic/test_tools/first_flight.cc", ], hdrs = [ "quiche/quic/test_tools/first_flight.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_config_lib", ":quic_core_connection_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_http_client_lib", ":quic_core_packet_writer_interface_lib", ":quic_core_packets_lib", ":quic_core_types_lib", ":quic_core_versions_lib", ":quic_platform", ":quic_test_tools_test_utils_interface_lib", ], ) envoy_cc_library( name = "quic_test_tools_flow_controller_peer_lib", srcs = [ "quiche/quic/test_tools/quic_flow_controller_peer.cc", ], hdrs = [ "quiche/quic/test_tools/quic_flow_controller_peer.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_packets_lib", ":quic_core_session_lib", ], ) envoy_cc_test_library( name = "quic_test_tools_framer_peer_lib", srcs = ["quiche/quic/test_tools/quic_framer_peer.cc"], hdrs = ["quiche/quic/test_tools/quic_framer_peer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", ":quic_core_packets_lib", ":quic_platform_base", ], ) envoy_cc_library( name = "quic_test_tools_interval_deque_peer_lib", hdrs = ["quiche/quic/test_tools/quic_interval_deque_peer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_interval_deque_lib", ":quic_core_interval_lib", ], ) envoy_cc_test_library( name = "quic_test_tools_mock_clock_lib", srcs = ["quiche/quic/test_tools/mock_clock.cc"], hdrs = ["quiche/quic/test_tools/mock_clock.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_clock_lib", ":quic_core_time_lib", ], ) envoy_cc_test_library( name = "quic_test_tools_mock_random_lib", srcs = ["quiche/quic/test_tools/mock_random.cc"], hdrs = ["quiche/quic/test_tools/mock_random.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":quic_core_crypto_random_lib"], ) envoy_cc_test_library( name = "quic_test_tools_mock_syscall_wrapper_lib", srcs = ["quiche/quic/test_tools/quic_mock_syscall_wrapper.cc"], hdrs = ["quiche/quic/test_tools/quic_mock_syscall_wrapper.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_syscall_wrapper_lib", ":quic_platform_base", ":quic_platform_test", ], ) envoy_cc_test_library( name = "quic_test_tools_sent_packet_manager_peer_lib", srcs = ["quiche/quic/test_tools/quic_sent_packet_manager_peer.cc"], hdrs = ["quiche/quic/test_tools/quic_sent_packet_manager_peer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_packets_lib", ":quic_core_sent_packet_manager_lib", ":quic_test_tools_unacked_packet_map_peer_lib", ], ) envoy_cc_test_library( name = "quic_test_tools_server_session_base_peer", hdrs = [ "quiche/quic/test_tools/quic_server_session_base_peer.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_http_spdy_session_lib", ":quic_core_utils_lib", ], ) envoy_cc_test_library( name = "quic_test_tools_simple_quic_framer_lib", srcs = ["quiche/quic/test_tools/simple_quic_framer.cc"], hdrs = ["quiche/quic/test_tools/simple_quic_framer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_crypto_encryption_lib", ":quic_core_framer_lib", ":quic_core_packets_lib", ":quic_platform_base", ], ) envoy_cc_test_library( name = "quic_test_tools_stream_send_buffer_peer_lib", srcs = ["quiche/quic/test_tools/quic_stream_send_buffer_peer.cc"], hdrs = ["quiche/quic/test_tools/quic_stream_send_buffer_peer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_stream_send_buffer_lib", ":quic_test_tools_interval_deque_peer_lib", ], ) envoy_cc_test_library( name = "quic_test_tools_stream_peer_lib", srcs = ["quiche/quic/test_tools/quic_stream_peer.cc"], hdrs = ["quiche/quic/test_tools/quic_stream_peer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_packets_lib", ":quic_core_session_lib", ":quic_core_stream_send_buffer_lib", ":quic_platform_base", ":quic_test_tools_flow_controller_peer_lib", ":quic_test_tools_stream_send_buffer_peer_lib", ], ) envoy_cc_test_library( name = "quic_test_tools_test_certificates_lib", srcs = ["quiche/quic/test_tools/test_certificates.cc"], hdrs = ["quiche/quic/test_tools/test_certificates.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_platform_base", ":quiche_common_platform", ], ) envoy_cc_test_library( name = "quic_test_tools_test_utils_interface_lib", srcs = [ "quiche/quic/test_tools/crypto_test_utils.cc", "quiche/quic/test_tools/mock_quic_session_visitor.cc", "quiche/quic/test_tools/mock_quic_time_wait_list_manager.cc", "quiche/quic/test_tools/quic_buffered_packet_store_peer.cc", "quiche/quic/test_tools/quic_connection_peer.cc", "quiche/quic/test_tools/quic_dispatcher_peer.cc", "quiche/quic/test_tools/quic_test_utils.cc", ], hdrs = [ "quiche/quic/test_tools/crypto_test_utils.h", "quiche/quic/test_tools/mock_quic_session_visitor.h", "quiche/quic/test_tools/mock_quic_time_wait_list_manager.h", "quiche/quic/test_tools/quic_buffered_packet_store_peer.h", "quiche/quic/test_tools/quic_connection_peer.h", "quiche/quic/test_tools/quic_dispatcher_peer.h", "quiche/quic/test_tools/quic_test_utils.h", ], copts = quiche_copts, external_deps = ["ssl"], repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_buffer_allocator_lib", ":quic_core_congestion_control_congestion_control_interface_lib", ":quic_core_connection_lib", ":quic_core_connection_stats_lib", ":quic_core_crypto_crypto_handshake_lib", ":quic_core_crypto_encryption_lib", ":quic_core_crypto_proof_source_interface_lib", ":quic_core_crypto_random_lib", ":quic_core_data_lib", ":quic_core_framer_lib", ":quic_core_http_client_lib", ":quic_core_http_spdy_session_lib", ":quic_core_packet_creator_lib", ":quic_core_packet_writer_interface_lib", ":quic_core_packets_lib", ":quic_core_received_packet_manager_lib", ":quic_core_sent_packet_manager_lib", ":quic_core_server_id_lib", ":quic_core_server_lib", ":quic_core_session_lib", ":quic_core_time_wait_list_manager_lib", ":quic_core_utils_lib", ":quic_platform", ":quic_platform_test", ":quic_test_tools_config_peer_lib", ":quic_test_tools_framer_peer_lib", ":quic_test_tools_mock_clock_lib", ":quic_test_tools_mock_random_lib", ":quic_test_tools_sent_packet_manager_peer_lib", ":quic_test_tools_simple_quic_framer_lib", ":quic_test_tools_stream_peer_lib", ":quiche_common_test_tools_test_utils_lib", ":spdy_core_framer_lib", ], ) envoy_cc_test_library( name = "quic_test_tools_session_peer_lib", srcs = [ "quiche/quic/test_tools/quic_session_peer.cc", ], hdrs = [ "quiche/quic/test_tools/quic_session_peer.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_packets_lib", ":quic_core_session_lib", ":quic_core_utils_lib", ":quic_platform", ], ) envoy_cc_test_library( name = "quic_test_tools_unacked_packet_map_peer_lib", srcs = ["quiche/quic/test_tools/quic_unacked_packet_map_peer.cc"], hdrs = ["quiche/quic/test_tools/quic_unacked_packet_map_peer.h"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":quic_core_unacked_packet_map_lib"], ) envoy_cc_test_library( name = "epoll_server_platform", hdrs = [ "quiche/epoll_server/platform/api/epoll_address_test_utils.h", "quiche/epoll_server/platform/api/epoll_bug.h", "quiche/epoll_server/platform/api/epoll_expect_bug.h", "quiche/epoll_server/platform/api/epoll_export.h", "quiche/epoll_server/platform/api/epoll_logging.h", "quiche/epoll_server/platform/api/epoll_ptr_util.h", "quiche/epoll_server/platform/api/epoll_test.h", "quiche/epoll_server/platform/api/epoll_thread.h", "quiche/epoll_server/platform/api/epoll_time.h", ], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:epoll_server_platform_impl_lib"], ) envoy_cc_test_library( name = "epoll_server_lib", srcs = select({ "@envoy//bazel:linux": [ "quiche/epoll_server/fake_simple_epoll_server.cc", "quiche/epoll_server/simple_epoll_server.cc", ], "//conditions:default": [], }), hdrs = select({ "@envoy//bazel:linux": [ "quiche/epoll_server/fake_simple_epoll_server.h", "quiche/epoll_server/simple_epoll_server.h", ], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":epoll_server_platform"], ) envoy_cc_library( name = "quiche_common_platform_optional", hdrs = ["quiche/common/platform/api/quiche_optional.h"], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quiche_common_platform_export", "@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_optional_impl_lib", ], ) envoy_cc_library( name = "quiche_common_platform", hdrs = [ "quiche/common/platform/api/quiche_arraysize.h", "quiche/common/platform/api/quiche_logging.h", "quiche/common/platform/api/quiche_optional.h", "quiche/common/platform/api/quiche_ptr_util.h", "quiche/common/platform/api/quiche_str_cat.h", "quiche/common/platform/api/quiche_string_piece.h", "quiche/common/platform/api/quiche_text_utils.h", "quiche/common/platform/api/quiche_time_utils.h", "quiche/common/platform/api/quiche_unordered_containers.h", ], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quiche_common_platform_export", ":quiche_common_platform_optional", "@envoy//source/extensions/quic_listeners/quiche/platform:quiche_common_platform_impl_lib", ], ) envoy_cc_test_library( name = "quiche_common_platform_test", srcs = [ "quiche/common/platform/api/quiche_endian_test.cc", "quiche/common/platform/api/quiche_str_cat_test.cc", "quiche/common/platform/api/quiche_text_utils_test.cc", "quiche/common/platform/api/quiche_time_utils_test.cc", ], hdrs = ["quiche/common/platform/api/quiche_test.h"], repository = "@envoy", tags = ["nofips"], deps = [ ":quiche_common_platform", ":quiche_common_platform_endian", "@envoy//test/extensions/quic_listeners/quiche/platform:quiche_common_platform_test_impl_lib", ], ) envoy_cc_library( name = "quiche_common_lib", srcs = [ "quiche/common/quiche_data_reader.cc", "quiche/common/quiche_data_writer.cc", ], hdrs = [ "quiche/common/quiche_data_reader.h", "quiche/common/quiche_data_writer.h", "quiche/common/simple_linked_hash_map.h", ], repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = [ ":quiche_common_platform", ":quiche_common_platform_endian", ], ) envoy_cc_test( name = "epoll_server_test", srcs = select({ "@envoy//bazel:linux": ["quiche/epoll_server/simple_epoll_server_test.cc"], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [":epoll_server_lib"], ) envoy_cc_test( name = "quiche_common_test", srcs = ["quiche/common/simple_linked_hash_map_test.cc"], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quiche_common_lib", ":quiche_common_platform_test", ], ) envoy_cc_test( name = "http2_platform_api_test", srcs = [ "quiche/http2/platform/api/http2_string_utils_test.cc", "quiche/http2/test_tools/http2_random_test.cc", ], repository = "@envoy", tags = ["nofips"], deps = [ ":http2_platform", ":http2_test_tools_random", ], ) envoy_cc_test( name = "spdy_platform_api_test", srcs = ["quiche/spdy/platform/api/spdy_string_utils_test.cc"], repository = "@envoy", tags = ["nofips"], deps = [ ":quiche_common_test_tools_test_utils_lib", ":spdy_platform", ], ) envoy_cc_library( name = "quic_platform_mem_slice_span", hdrs = [ "quiche/quic/platform/api/quic_mem_slice_span.h", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], visibility = ["//visibility:public"], deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_mem_slice_span_impl_lib"], ) envoy_cc_test_library( name = "quic_platform_test_mem_slice_vector_lib", hdrs = ["quiche/quic/platform/api/quic_test_mem_slice_vector.h"], repository = "@envoy", tags = ["nofips"], deps = ["@envoy//test/extensions/quic_listeners/quiche/platform:quic_platform_test_mem_slice_vector_impl_lib"], ) envoy_cc_library( name = "quic_platform_mem_slice_storage", hdrs = ["quiche/quic/platform/api/quic_mem_slice_storage.h"], repository = "@envoy", visibility = ["//visibility:public"], deps = ["@envoy//source/extensions/quic_listeners/quiche/platform:quic_platform_mem_slice_storage_impl_lib"], ) envoy_cc_test( name = "spdy_core_header_block_test", srcs = ["quiche/spdy/core/spdy_header_block_test.cc"], copts = quiche_copts, coverage = False, repository = "@envoy", tags = ["nofips"], deps = [ ":spdy_core_header_block_lib", ":spdy_core_test_utils_lib", ], ) envoy_cc_test( name = "quic_platform_api_test", srcs = [ "quiche/quic/platform/api/quic_containers_test.cc", "quiche/quic/platform/api/quic_mem_slice_span_test.cc", # Re-enable it when tests pass. # "quiche/quic/platform/api/quic_mem_slice_storage_test.cc", "quiche/quic/platform/api/quic_mem_slice_test.cc", "quiche/quic/platform/api/quic_reference_counted_test.cc", "quiche/quic/platform/api/quic_string_utils_test.cc", ], copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_buffer_allocator_lib", ":quic_platform", ":quic_platform_mem_slice_span", ":quic_platform_mem_slice_storage", ":quic_platform_test", ":quic_platform_test_mem_slice_vector_lib", ], ) envoy_cc_test( name = "quic_core_batch_writer_batch_writer_test", srcs = select({ "@envoy//bazel:linux": ["quiche/quic/core/batch_writer/quic_batch_writer_test.cc"], "//conditions:default": [], }), copts = quiche_copts, repository = "@envoy", tags = ["nofips"], deps = [ ":quic_core_batch_writer_batch_writer_test_lib", ":quic_core_batch_writer_gso_batch_writer_lib", ":quic_core_batch_writer_sendmmsg_batch_writer_lib", ":quic_platform", ], ) ================================================ FILE: bazel/external/quiche.genrule_cmd ================================================ #!/bin/bash set -e # This script is invoked from quiche.BUILD to tweak QUICHE source files into a # form usable by Envoy. Transformations performed here: # # - Move subtree under quiche/ base dir, for clarity in #include statements. # - Rewrite include directives for platform/impl files to point to the directory # containing Envoy's QUICHE platform implementation. # - Fix include directives for non-platform/impl files to remove # "net/third_party" from the path. (This is an artifact of Chromium source # tree structure.) # Determine base directory of unmodified QUICHE source files. In practice, this # ends up being "external/com_googlesource_quiche". src_base_dir=$$(dirname $$(dirname $$(dirname $(rootpath quic/core/quic_constants.h)))) # sed commands to apply to each source file. cat <sed_commands # Rewrite include directives for testonly platform impl files. /^#include/ s!net/http2/platform/impl/http2_reconstruct_object_impl.h!test/extensions/quic_listeners/quiche/platform/http2_reconstruct_object_impl.h! /^#include/ s!net/quic/platform/impl/quic_expect_bug_impl.h!test/extensions/quic_listeners/quiche/platform/quic_expect_bug_impl.h! /^#include/ s!net/quic/platform/impl/quic_mock_log_impl.h!test/extensions/quic_listeners/quiche/platform/quic_mock_log_impl.h! /^#include/ s!net/quic/platform/impl/quic_port_utils_impl.h!test/extensions/quic_listeners/quiche/platform/quic_port_utils_impl.h! /^#include/ s!net/quic/platform/impl/quic_sleep_impl.h!test/extensions/quic_listeners/quiche/platform/quic_sleep_impl.h! /^#include/ s!net/quic/platform/impl/quic_system_event_loop_impl.h!test/extensions/quic_listeners/quiche/platform/quic_system_event_loop_impl.h! /^#include/ s!net/quic/platform/impl/quic_test_impl.h!test/extensions/quic_listeners/quiche/platform/quic_test_impl.h! /^#include/ s!net/quic/platform/impl/quic_test_mem_slice_vector_impl.h!test/extensions/quic_listeners/quiche/platform/quic_test_mem_slice_vector_impl.h! /^#include/ s!net/quic/platform/impl/quic_test_output_impl.h!test/extensions/quic_listeners/quiche/platform/quic_test_output_impl.h! /^#include/ s!net/quic/platform/impl/quic_thread_impl.h!test/extensions/quic_listeners/quiche/platform/quic_thread_impl.h! /^#include/ s!net/quiche/common/platform/impl/quiche_test_impl.h!test/extensions/quic_listeners/quiche/platform/quiche_test_impl.h! /^#include/ s!net/spdy/platform/impl/spdy_test_helpers_impl.h!test/extensions/quic_listeners/quiche/platform/spdy_test_helpers_impl.h! /^#include/ s!net/spdy/platform/impl/spdy_test_impl.h!test/extensions/quic_listeners/quiche/platform/spdy_test_impl.h! # Rewrite include directives for platform impl files. /^#include/ s!net/(http2|spdy|quic|quiche/common)/platform/impl/!extensions/quic_listeners/quiche/platform/! # Rewrite include directives for epoll_server platform impl files. /^#include/ s!net/tools/epoll_server/platform/impl!test/extensions/quic_listeners/quiche/platform/! # Strip "net/third_party" from include directives to other QUICHE files. /^#include/ s!net/third_party/quiche/src/!quiche/! # Rewrite gmock & gtest includes. /^#include/ s!testing/gmock/include/gmock/!gmock/! /^#include/ s!testing/gtest/include/gtest/!gtest/! # Rewrite third_party includes. /^#include/ s!third_party/boringssl/src/include/!! /^#include/ s!third_party/zlib/zlib!zlib! /^import/ s!cached_network_parameters!quiche/quic/core/proto/cached_network_parameters! # Rewrite #pragma clang /^#pragma/ s!clang!GCC! /^#pragma/ s!-Weverything!-Wall! EOF for src_file in $(SRCS); do # Extract relative path (e.g. "quic/core/quic_utils.cc") from full path in # src_path (e.g. "external/com_googlesource_quiche/quic/core/quic_utils.cc"). src_path="$${src_file#$$src_base_dir/}" # Map to output file with quiche/ base directory inserted in path. out_file="$(@D)/quiche/$$src_path" mkdir -p "$$(dirname "$$out_file")" # Apply text substitutions. -E ensures consistent behavior on Linux vs. OS X. sed -E -f sed_commands "$$src_file" > "$$out_file" done ================================================ FILE: bazel/external/rapidjson.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "rapidjson", hdrs = glob(["include/rapidjson/**/*.h"]), defines = ["RAPIDJSON_HAS_STDSTRING=1"], includes = ["include"], visibility = ["//visibility:public"], ) ================================================ FILE: bazel/external/spdlog.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "spdlog", hdrs = glob([ "include/**/*.h", ]), defines = ["SPDLOG_FMT_EXTERNAL"], includes = ["include"], visibility = ["//visibility:public"], deps = ["@com_github_fmtlib_fmt//:fmtlib"], ) ================================================ FILE: bazel/external/sqlparser.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "sqlparser", srcs = glob(["src/**/*.cpp"]), hdrs = glob([ "include/**/*.h", "src/**/*.h", ]), defines = select({ "@envoy//bazel:windows_x86_64": ["YY_NO_UNISTD_H"], "//conditions:default": [], }), visibility = ["//visibility:public"], ) ================================================ FILE: bazel/external/tclap.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "tclap", hdrs = glob(["include/tclap/*.h"]), includes = ["include"], visibility = ["//visibility:public"], ) ================================================ FILE: bazel/external/twitter_common_finagle_thrift.BUILD ================================================ load("@rules_python//python:defs.bzl", "py_library") licenses(["notice"]) # Apache 2 py_library( name = "twitter_common_finagle_thrift", srcs = glob([ "gen/**/*.py", ]), visibility = ["//visibility:public"], ) ================================================ FILE: bazel/external/twitter_common_lang.BUILD ================================================ load("@rules_python//python:defs.bzl", "py_library") licenses(["notice"]) # Apache 2 py_library( name = "twitter_common_lang", srcs = glob([ "twitter/**/*.py", ]), visibility = ["//visibility:public"], ) ================================================ FILE: bazel/external/twitter_common_rpc.BUILD ================================================ load("@rules_python//python:defs.bzl", "py_library") licenses(["notice"]) # Apache 2 py_library( name = "twitter_common_rpc", srcs = glob([ "twitter/**/*.py", ]), visibility = ["//visibility:public"], deps = [ "@com_github_twitter_common_finagle_thrift//:twitter_common_finagle_thrift", "@com_github_twitter_common_lang//:twitter_common_lang", ], ) ================================================ FILE: bazel/external/wee8.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") load("@envoy_large_machine_exec_property//:constants.bzl", "LARGE_MACHINE") load(":genrule_cmd.bzl", "genrule_cmd") licenses(["notice"]) # Apache 2 cc_library( name = "wee8", srcs = [ "libwee8.a", ], hdrs = [ "wee8/include/v8-version.h", "wee8/third_party/wasm-api/wasm.hh", ], defines = ["ENVOY_WASM_V8"], includes = [ "wee8/include", "wee8/third_party", ], visibility = ["//visibility:public"], ) genrule( name = "build", srcs = glob(["wee8/**"]), outs = [ "libwee8.a", ], cmd = genrule_cmd("@envoy//bazel/external:wee8.genrule_cmd"), exec_properties = LARGE_MACHINE, ) ================================================ FILE: bazel/external/wee8.genrule_cmd ================================================ #!/bin/bash set -e # This works only on Linux-{x86_64,s390x,aarch64} and macOS-x86_64. case "$$(uname -s)-$$(uname -m)" in Linux-x86_64|Linux-s390x|Linux-aarch64|Darwin-x86_64) ;; *) echo "ERROR: wee8 is currently supported only on Linux-{x86_64,s390x,aarch64} and macOS-x86_64." >&2 exit 1 esac # Bazel magic. ROOT=$$(dirname $(rootpath wee8/BUILD.gn))/.. pushd $$ROOT/wee8 # Clean after previous build. rm -rf out/wee8 # Export compiler configuration. export CXXFLAGS="$${CXXFLAGS-} -Wno-sign-compare -Wno-deprecated-copy -Wno-unknown-warning-option" if [[ ( `uname` == "Darwin" && $${CXX-} == "" ) || $${CXX-} == *"clang"* ]]; then export IS_CLANG=true export CC=$${CC:-clang} export CXX=$${CXX:-clang++} export CXXFLAGS="$${CXXFLAGS} -Wno-implicit-int-float-conversion -Wno-builtin-assume-aligned-alignment -Wno-final-dtor-non-final-class" else export IS_CLANG=false export CC=$${CC:-gcc} export CXX=$${CXX:-g++} fi export AR=$${AR:-ar} export NM=$${NM:-nm} # Hook sanitizers. if [[ $${ENVOY_ASAN-} == "1" ]]; then WEE8_BUILD_ARGS+=" is_asan=true" WEE8_BUILD_ARGS+=" is_lsan=true" fi if [[ $${ENVOY_UBSAN_VPTR-} == "1" ]]; then WEE8_BUILD_ARGS+=" is_ubsan=true" WEE8_BUILD_ARGS+=" is_ubsan_vptr=true" fi if [[ $${ENVOY_MSAN-} == "1" ]]; then WEE8_BUILD_ARGS+=" is_msan=true" export LDFLAGS="$${LDFLAGS} -L/opt/libcxx_msan/lib -Wl,-rpath,/opt/libcxx_msan/lib" fi if [[ $${ENVOY_TSAN-} == "1" ]]; then WEE8_BUILD_ARGS+=" is_tsan=true" fi # Debug/release build. if [[ $(COMPILATION_MODE) == "dbg" && $${ENVOY_UBSAN_VPTR-} != "1" && $${ENVOY_MSAN-} != "1" && $${ENVOY_TSAN-} != "1" ]]; then WEE8_BUILD_ARGS+=" is_debug=true" WEE8_BUILD_ARGS+=" v8_symbol_level=2" WEE8_BUILD_ARGS+=" v8_optimized_debug=false" else WEE8_BUILD_ARGS+=" is_debug=false" WEE8_BUILD_ARGS+=" v8_symbol_level=1" WEE8_BUILD_ARGS+=" v8_enable_handle_zapping=false" fi # Clang or not Clang, that is the question. WEE8_BUILD_ARGS+=" is_clang=$$IS_CLANG" # Hack to disable bleeding-edge compiler flags. WEE8_BUILD_ARGS+=" use_xcode_clang=true" # Use local toolchain. WEE8_BUILD_ARGS+=" custom_toolchain=\"//build/toolchain/linux/unbundle:default\"" # Use local stdlibc++ / libc++. WEE8_BUILD_ARGS+=" use_custom_libcxx=false" # Use local sysroot. WEE8_BUILD_ARGS+=" use_sysroot=false" # Disable unused GLib2 dependency. WEE8_BUILD_ARGS+=" use_glib=false" # Expose debug symbols. WEE8_BUILD_ARGS+=" v8_expose_symbols=true" # Build monolithic library. WEE8_BUILD_ARGS+=" is_component_build=false" WEE8_BUILD_ARGS+=" v8_enable_i18n_support=false" WEE8_BUILD_ARGS+=" v8_enable_gdbjit=false" WEE8_BUILD_ARGS+=" v8_use_external_startup_data=false" # Disable read-only heap, since it's leaky and HEAPCHECK complains about it. # TODO(PiotrSikora): remove when fixed upstream. WEE8_BUILD_ARGS+=" v8_enable_shared_ro_heap=false" # Support Arm64 if [[ `uname -m` == "aarch64" ]]; then WEE8_BUILD_ARGS+=" target_cpu=\"arm64\"" fi # Build wee8. if [[ -f /etc/centos-release ]] && [[ $$(cat /etc/centos-release) =~ "CentOS Linux release 7" ]] && [[ -x "$$(command -v gn)" ]]; then # Using system default gn tools # This is done only for CentOS 7, as it has an old version of GLIBC which is otherwise incompatible gn=$$(command -v gn) elif [[ "$$(uname -s)" == "Darwin" ]]; then gn=buildtools/mac/gn elif [[ "$$(uname -s)-$$(uname -m)" == "Linux-x86_64" ]]; then gn=buildtools/linux64/gn else # Using system default gn tools gn=$$(command -v gn) fi if [[ "$$(uname -s)" == "Darwin" ]]; then ninja=third_party/depot_tools/ninja elif [[ "$$(uname -s)-$$(uname -m)" == "Linux-x86_64" ]]; then ninja=third_party/depot_tools/ninja else # Using system default ninja tools ninja=$$(command -v ninja) fi "$$gn" gen out/wee8 --args="$$WEE8_BUILD_ARGS" "$$ninja" -C out/wee8 wee8 # Move compiled library to the expected destinations. popd mv $$ROOT/wee8/out/wee8/obj/libwee8.a $(execpath libwee8.a) ================================================ FILE: bazel/external/wee8.patch ================================================ # 1. Fix linking with unbundled toolchain on macOS. # 2. Increase VSZ limit to 4TiB (allows us to start up to 409 VMs). # 3. Fix MSAN linking. --- wee8/build/toolchain/gcc_toolchain.gni +++ wee8/build/toolchain/gcc_toolchain.gni @@ -329,6 +329,8 @@ template("gcc_toolchain") { # AIX does not support either -D (deterministic output) or response # files. command = "$ar -X64 {{arflags}} -r -c -s {{output}} {{inputs}}" + } else if (current_os == "mac") { + command = "\"$ar\" {{arflags}} -r -c -s {{output}} {{inputs}}" } else { rspfile = "{{output}}.rsp" rspfile_content = "{{inputs}}" @@ -507,7 +509,7 @@ template("gcc_toolchain") { start_group_flag = "" end_group_flag = "" - if (current_os != "aix") { + if (current_os != "aix" && current_os != "mac") { # the "--start-group .. --end-group" feature isn't available on the aix ld. start_group_flag = "-Wl,--start-group" end_group_flag = "-Wl,--end-group " --- wee8/src/objects/backing-store.cc +++ wee8/src/objects/backing-store.cc @@ -34,7 +34,7 @@ constexpr bool kUseGuardRegions = false; // address space limits needs to be smaller. constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB #elif V8_TARGET_ARCH_64_BIT -constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB +constexpr size_t kAddressSpaceLimit = 0x40100000000L; // 4 TiB + 4 GiB #else constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB #endif --- wee8/build/config/sanitizers/sanitizers.gni +++ wee8/build/config/sanitizers/sanitizers.gni @@ -150,7 +150,7 @@ if (!is_a_target_toolchain) { # standard system libraries. We have instrumented system libraries for msan, # which requires them to prevent false positives. # TODO(thakis): Maybe remove this variable. -use_prebuilt_instrumented_libraries = is_msan +use_prebuilt_instrumented_libraries = false # Whether we are doing a fuzzer build. Normally this should be checked instead # of checking "use_libfuzzer || use_afl" because often developers forget to @@ -198,8 +198,7 @@ assert(!using_sanitizer || is_clang, assert(!is_cfi || is_clang, "is_cfi requires setting is_clang = true in 'gn args'") -prebuilt_instrumented_libraries_available = - is_msan && (msan_track_origins == 0 || msan_track_origins == 2) +prebuilt_instrumented_libraries_available = false if (use_libfuzzer && is_linux) { if (is_asan) { ================================================ FILE: bazel/external/xxhash.BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") licenses(["notice"]) # Apache 2 cc_library( name = "xxhash", srcs = ["xxhash.c"], hdrs = [ "xxh3.h", "xxhash.h", ], visibility = ["//visibility:public"], ) ================================================ FILE: bazel/foreign_cc/BUILD ================================================ load("@rules_cc//cc:defs.bzl", "cc_library") load("//bazel:envoy_build_system.bzl", "envoy_cmake_external", "envoy_package") load("@rules_foreign_cc//tools/build_defs:configure.bzl", "configure_make") licenses(["notice"]) # Apache 2 envoy_package() # autotools packages are unusable on Windows as-is # TODO: Consider our own gperftools.BUILD file as we do with many other packages configure_make( name = "gperftools_build", configure_options = [ "--enable-shared=no", "--enable-frame-pointers", "--disable-libunwind", ] + select({ "//bazel:apple": ["AR=/usr/bin/ar"], "//conditions:default": [], }), lib_source = "@com_github_gperftools_gperftools//:all", linkopts = ["-lpthread"], make_commands = ["make install-libLTLIBRARIES install-perftoolsincludeHEADERS"], static_libraries = select({ "//bazel:debug_tcmalloc": ["libtcmalloc_debug.a"], "//conditions:default": ["libtcmalloc_and_profiler.a"], }), tags = ["skip_on_windows"], ) # Workaround for https://github.com/bazelbuild/rules_foreign_cc/issues/227 cc_library( name = "gperftools", tags = ["skip_on_windows"], deps = [ "gperftools_build", ], ) configure_make( name = "luajit", configure_command = "build.py", configure_env_vars = select({ # This shouldn't be needed! See # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed "//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"}, "//bazel:msan_build": {"ENVOY_CONFIG_MSAN": "1"}, "//bazel:windows_dbg_build": {"WINDOWS_DBG_BUILD": "debug"}, "//conditions:default": {}, }), lib_source = "@com_github_luajit_luajit//:all", make_commands = [], out_include_dir = "include/luajit-2.1", static_libraries = select({ "//bazel:windows_x86_64": ["lua51.lib"], "//conditions:default": ["libluajit-5.1.a"], }), ) configure_make( name = "moonjit", configure_command = "build.py", configure_env_vars = select({ # This shouldn't be needed! See # https://github.com/envoyproxy/envoy/issues/6084 # TODO(htuch): Remove when #6084 is fixed "//bazel:asan_build": {"ENVOY_CONFIG_ASAN": "1"}, "//bazel:msan_build": {"ENVOY_CONFIG_MSAN": "1"}, "//bazel:windows_dbg_build": {"WINDOWS_DBG_BUILD": "debug"}, "//conditions:default": {}, }), lib_source = "@com_github_moonjit_moonjit//:all", make_commands = [], out_include_dir = "include/moonjit-2.2", static_libraries = select({ "//bazel:windows_x86_64": ["lua51.lib"], "//conditions:default": ["libluajit-5.1.a"], }), ) envoy_cmake_external( name = "ares", cache_entries = { "CARES_SHARED": "no", "CARES_STATIC": "on", "CMAKE_CXX_COMPILER_FORCED": "on", "CMAKE_INSTALL_LIBDIR": "lib", }, defines = ["CARES_STATICLIB"], lib_source = "@com_github_c_ares_c_ares//:all", linkopts = select({ "//bazel:apple": ["-lresolv"], "//conditions:default": [], }), postfix_script = select({ "//bazel:windows_x86_64": "cp -L $EXT_BUILD_ROOT/external/com_github_c_ares_c_ares/nameser.h $INSTALLDIR/include/nameser.h", "//conditions:default": "", }), static_libraries = select({ "//bazel:windows_x86_64": ["cares.lib"], "//conditions:default": ["libcares.a"], }), ) envoy_cmake_external( name = "curl", cache_entries = { "BUILD_CURL_EXE": "off", "BUILD_TESTING": "off", "BUILD_SHARED_LIBS": "off", "CURL_HIDDEN_SYMBOLS": "off", "CMAKE_USE_LIBSSH2": "off", "CURL_BROTLI": "off", "CMAKE_USE_GSSAPI": "off", "HTTP_ONLY": "on", "CMAKE_INSTALL_LIBDIR": "lib", # Explicitly enable Unix sockets, once afunix.h is correctly detected # "USE_UNIX_SOCKETS": "on", # Explicitly disable "Windows" crypto for Windows "CURL_DISABLE_CRYPTO_AUTH": "on", # C-Ares. "ENABLE_ARES": "on", "CARES_LIBRARY": "$EXT_BUILD_DEPS/ares", "CARES_INCLUDE_DIR": "$EXT_BUILD_DEPS/ares/include", # SSL (via Envoy's SSL dependency) is disabled, curl's CMake uses # FindOpenSSL.cmake which fails at what looks like version parsing # (the libraries are found ok). "CURL_CA_PATH": "none", "CMAKE_USE_OPENSSL": "off", "OPENSSL_ROOT_DIR": "$EXT_BUILD_DEPS", # NGHTTP2. "USE_NGHTTP2": "on", "NGHTTP2_LIBRARY": "$EXT_BUILD_DEPS/nghttp2", "NGHTTP2_INCLUDE_DIR": "$EXT_BUILD_DEPS/nghttp2/include", # ZLIB. "CURL_ZLIB": "on", "ZLIB_LIBRARY": "$EXT_BUILD_DEPS/zlib", "ZLIB_INCLUDE_DIR": "$EXT_BUILD_DEPS/zlib/include", "CMAKE_CXX_COMPILER_FORCED": "on", "CMAKE_C_FLAGS_BAZEL": "-fPIC", # Note we use Bazel's flags (not _RELEASE/_DEBUG CMake flags), but this toggle # also works around a bug in CMP0091 logic which re-injected a badly placed -M flag. # See https://github.com/bazelbuild/rules_foreign_cc/issues/426 "CURL_STATIC_CRT": "on", }, defines = ["CURL_STATICLIB"], generate_crosstool_file = True, lib_source = "@com_github_curl//:all", static_libraries = select({ "//bazel:windows_x86_64": ["libcurl.lib"], "//conditions:default": ["libcurl.a"], }), deps = [ ":ares", ":nghttp2", "//external:ssl", "//external:zlib", ], ) envoy_cmake_external( name = "event", cache_entries = { "EVENT__DISABLE_OPENSSL": "on", "EVENT__DISABLE_MBEDTLS": "on", "EVENT__DISABLE_REGRESS": "on", "EVENT__DISABLE_TESTS": "on", "EVENT__LIBRARY_TYPE": "STATIC", # Force _GNU_SOURCE on for Android builds. This would be contained in # a 'select' but the downstream macro uses a select on all of these # options, and they cannot be nested. # If https://github.com/bazelbuild/rules_foreign_cc/issues/289 is fixed # this can be removed. # More details https://github.com/lyft/envoy-mobile/issues/116 "_GNU_SOURCE": "on", }, lib_source = "@com_github_libevent_libevent//:all", static_libraries = select({ # macOS organization of libevent is different from Windows/Linux. # Including libevent_core is a requirement on those platforms, but # results in duplicate symbols when built on macOS. # See https://github.com/lyft/envoy-mobile/issues/677 for details. "//bazel:apple": [ "libevent.a", "libevent_pthreads.a", ], "//bazel:windows_x86_64": [ "event.lib", "event_core.lib", ], "//conditions:default": [ "libevent.a", "libevent_pthreads.a", "libevent_core.a", ], }), ) envoy_cmake_external( name = "llvm", cache_entries = { # Disable both: BUILD and INCLUDE, since some of the INCLUDE # targets build code instead of only generating build files. "LLVM_BUILD_DOCS": "off", "LLVM_INCLUDE_DOCS": "off", "LLVM_BUILD_EXAMPLES": "off", "LLVM_INCLUDE_EXAMPLES": "off", "LLVM_BUILD_RUNTIME": "off", "LLVM_BUILD_RUNTIMES": "off", "LLVM_INCLUDE_RUNTIMES": "off", "LLVM_BUILD_TESTS": "off", "LLVM_INCLUDE_TESTS": "off", "LLVM_BUILD_TOOLS": "off", "LLVM_INCLUDE_TOOLS": "off", "LLVM_BUILD_UTILS": "off", "LLVM_INCLUDE_UTILS": "off", "LLVM_ENABLE_LIBEDIT": "off", "LLVM_ENABLE_LIBXML2": "off", "LLVM_ENABLE_TERMINFO": "off", "LLVM_ENABLE_ZLIB": "off", "LLVM_TARGETS_TO_BUILD": "X86", "CMAKE_CXX_COMPILER_FORCED": "on", # Workaround for the issue with statically linked libstdc++ # using -l:libstdc++.a. "CMAKE_CXX_FLAGS": "-lstdc++", }, env_vars = { # Workaround for the -DDEBUG flag added in fastbuild on macOS, # which conflicts with DEBUG macro used in LLVM. "CFLAGS": "-UDEBUG", "CXXFLAGS": "-UDEBUG", "ASMFLAGS": "-UDEBUG", }, lib_source = "@org_llvm_llvm//:all", static_libraries = select({ "//conditions:default": [ # Order from llvm-config --libnames. "libLLVMLTO.a", "libLLVMPasses.a", "libLLVMObjCARCOpts.a", "libLLVMSymbolize.a", "libLLVMDebugInfoPDB.a", "libLLVMDebugInfoDWARF.a", "libLLVMFuzzMutate.a", "libLLVMTableGen.a", "libLLVMDlltoolDriver.a", "libLLVMLineEditor.a", "libLLVMOrcJIT.a", "libLLVMCoverage.a", "libLLVMMIRParser.a", "libLLVMObjectYAML.a", "libLLVMLibDriver.a", "libLLVMOption.a", "libLLVMWindowsManifest.a", "libLLVMX86Disassembler.a", "libLLVMX86AsmParser.a", "libLLVMX86CodeGen.a", "libLLVMGlobalISel.a", "libLLVMSelectionDAG.a", "libLLVMAsmPrinter.a", "libLLVMDebugInfoCodeView.a", "libLLVMDebugInfoMSF.a", "libLLVMX86Desc.a", "libLLVMMCDisassembler.a", "libLLVMX86Info.a", "libLLVMX86Utils.a", "libLLVMMCJIT.a", "libLLVMInterpreter.a", "libLLVMExecutionEngine.a", "libLLVMRuntimeDyld.a", "libLLVMCodeGen.a", "libLLVMTarget.a", "libLLVMCoroutines.a", "libLLVMipo.a", "libLLVMInstrumentation.a", "libLLVMVectorize.a", "libLLVMScalarOpts.a", "libLLVMLinker.a", "libLLVMIRReader.a", "libLLVMAsmParser.a", "libLLVMInstCombine.a", "libLLVMTransformUtils.a", "libLLVMBitWriter.a", "libLLVMAnalysis.a", "libLLVMProfileData.a", "libLLVMObject.a", "libLLVMMCParser.a", "libLLVMMC.a", "libLLVMBitReader.a", "libLLVMBitstreamReader.a", "libLLVMCore.a", "libLLVMBinaryFormat.a", "libLLVMSupport.a", "libLLVMDemangle.a", "libLLVMRemarks.a", "libLLVMCFGuard.a", "libLLVMTextAPI.a", ], }), ) envoy_cmake_external( name = "nghttp2", cache_entries = { "ENABLE_LIB_ONLY": "on", "ENABLE_SHARED_LIB": "off", "ENABLE_STATIC_LIB": "on", "CMAKE_INSTALL_LIBDIR": "lib", "CMAKE_CXX_COMPILER_FORCED": "on", }, cmake_files_dir = "$BUILD_TMPDIR/lib/CMakeFiles", debug_cache_entries = {"ENABLE_DEBUG": "on"}, defines = ["NGHTTP2_STATICLIB"], lib_source = "@com_github_nghttp2_nghttp2//:all", static_libraries = select({ "//bazel:windows_x86_64": ["nghttp2.lib"], "//conditions:default": ["libnghttp2.a"], }), ) envoy_cmake_external( name = "wavm", binaries = ["wavm"], cache_entries = { "LLVM_DIR": "$EXT_BUILD_DEPS/copy_llvm/llvm/lib/cmake/llvm", "WAVM_ENABLE_STATIC_LINKING": "on", "WAVM_ENABLE_RELEASE_ASSERTS": "on", "WAVM_ENABLE_UNWIND": "no", # Workaround for the issue with statically linked libstdc++ # using -l:libstdc++.a. "CMAKE_CXX_FLAGS": "-lstdc++ -Wno-unused-command-line-argument", }, defines = ["ENVOY_WASM_WAVM"], env_vars = { # Workaround for the -DDEBUG flag added in fastbuild on macOS, # which conflicts with DEBUG macro used in LLVM. "CFLAGS": "-UDEBUG", "CXXFLAGS": "-UDEBUG", "ASMFLAGS": "-UDEBUG", }, lib_source = "@com_github_wavm_wavm//:all", static_libraries = select({ "//conditions:default": [ "libWAVM.a", ], }), deps = [":llvm"], ) envoy_cmake_external( name = "zlib", cache_entries = { "BUILD_SHARED_LIBS": "off", "CMAKE_CXX_COMPILER_FORCED": "on", "CMAKE_C_COMPILER_FORCED": "on", "SKIP_BUILD_EXAMPLES": "on", # The following entries are for zlib-ng. Since zlib and zlib-ng are compatible source # codes and CMake ignores unknown cache entries, it is fine to combine it into one # dictionary. # # Reference: https://github.com/zlib-ng/zlib-ng#build-options. "ZLIB_COMPAT": "on", "ZLIB_ENABLE_TESTS": "off", # Warning: Turning WITH_OPTIM to "on" doesn't pass ZlibCompressorImplTest.CallingChecksum. "WITH_OPTIM": "on", # However turning off SSE4 fixes it. "WITH_SSE4": "off", # Warning: Turning WITH_NEW_STRATEGIES to "on" doesn't pass gzip compressor fuzz test. # Turning this off means falling into NO_QUICK_STRATEGY route. "WITH_NEW_STRATEGIES": "off", # Only allow aligned address. # Reference: https://github.com/zlib-ng/zlib-ng#advanced-build-options. "UNALIGNED_OK": "off", }, lib_source = select({ "//bazel:zlib_ng": "@com_github_zlib_ng_zlib_ng//:all", "//conditions:default": "@net_zlib//:all", }), static_libraries = select({ "//bazel:windows_x86_64": ["zlibstatic.lib"], "//conditions:default": ["libz.a"], }), ) ================================================ FILE: bazel/foreign_cc/llvm.patch ================================================ # Workaround for Envoy's CMAKE_BUILD_TYPE=Bazel. --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -247,7 +247,7 @@ string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE) if (CMAKE_BUILD_TYPE AND - NOT uppercase_CMAKE_BUILD_TYPE MATCHES "^(DEBUG|RELEASE|RELWITHDEBINFO|MINSIZEREL)$") + NOT uppercase_CMAKE_BUILD_TYPE MATCHES "^(DEBUG|RELEASE|RELWITHDEBINFO|MINSIZEREL|BAZEL)$") message(FATAL_ERROR "Invalid value for CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") endif() # Workaround for a missing -fuse-ld flag in CXXFLAGS, which results in # different linkers being used during configure and compilation phases. --- a/cmake/modules/HandleLLVMOptions.cmake +++ b/cmake/modules/HandleLLVMOptions.cmake @@ -718,8 +718,6 @@ endif() if (UNIX AND CMAKE_GENERATOR STREQUAL "Ninja") include(CheckLinkerFlag) check_linker_flag("-Wl,--color-diagnostics" LINKER_SUPPORTS_COLOR_DIAGNOSTICS) - append_if(LINKER_SUPPORTS_COLOR_DIAGNOSTICS "-Wl,--color-diagnostics" - CMAKE_EXE_LINKER_FLAGS CMAKE_MODULE_LINKER_FLAGS CMAKE_SHARED_LINKER_FLAGS) endif() # Add flags for add_dead_strip(). ================================================ FILE: bazel/foreign_cc/luajit.patch ================================================ diff --git a/src/Makefile b/src/Makefile index f56465d..5d91fa7 100644 --- a/src/Makefile +++ b/src/Makefile @@ -27,7 +27,7 @@ NODOTABIVER= 51 DEFAULT_CC = gcc # # LuaJIT builds as a native 32 or 64 bit binary by default. -CC= $(DEFAULT_CC) +CC ?= $(DEFAULT_CC) # # Use this if you want to force a 32 bit build on a 64 bit multilib OS. #CC= $(DEFAULT_CC) -m32 @@ -71,10 +71,10 @@ CCWARN= -Wall # as dynamic mode. # # Mixed mode creates a static + dynamic library and a statically linked luajit. -BUILDMODE= mixed +#BUILDMODE= mixed # # Static mode creates a static library and a statically linked luajit. -#BUILDMODE= static +BUILDMODE= static # # Dynamic mode creates a dynamic library and a dynamically linked luajit. # Note: this executable will only run when the library is installed! @@ -99,7 +99,7 @@ XCFLAGS= # enabled by default. Some other features that *might* break some existing # code (e.g. __pairs or os.execute() return values) can be enabled here. # Note: this does not provide full compatibility with Lua 5.2 at this time. -#XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT +XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT # # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. #XCFLAGS+= -DLUAJIT_DISABLE_JIT @@ -111,7 +111,7 @@ XCFLAGS= #XCFLAGS+= -DLUAJIT_NUMMODE=2 # # Enable GC64 mode for x64. -#XCFLAGS+= -DLUAJIT_ENABLE_GC64 +XCFLAGS+= -DLUAJIT_ENABLE_GC64 # ############################################################################## @@ -587,7 +587,7 @@ endif Q= @ E= @echo -#Q= +Q= #E= @: ############################################################################## EOF --- a/src/msvcbuild.bat 2020-08-13 18:42:05.667354300 +0000 +++ b/src/msvcbuild.bat 2020-08-13 19:03:25.092297900 +0000 @@ -14,7 +14,7 @@ @if not defined INCLUDE goto :FAIL @setlocal -@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline +@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT @set LJLINK=link /nologo @set LJMT=mt /nologo @set LJLIB=lib /nologo /nodefaultlib @@ -25,7 +25,7 @@ @set LJLIBNAME=lua51.lib @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c -%LJCOMPILE% host\minilua.c +%LJCOMPILE% /O2 host\minilua.c @if errorlevel 1 goto :BAD %LJLINK% /out:minilua.exe minilua.obj @if errorlevel 1 goto :BAD @@ -48,7 +48,7 @@ minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC% @if errorlevel 1 goto :BAD -%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c +%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c @if errorlevel 1 goto :BAD %LJLINK% /out:buildvm.exe buildvm*.obj @if errorlevel 1 goto :BAD @@ -72,24 +72,35 @@ @if "%1" neq "debug" goto :NODEBUG @shift -@set LJCOMPILE=%LJCOMPILE% /Zi +@set LJCOMPILE=%LJCOMPILE% /O0 /Z7 @set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no +@set LJCRTDBG=d +@goto :ENDDEBUG :NODEBUG +@set LJCOMPILE=%LJCOMPILE% /O2 /Z7 +@set LJLINK=%LJLINK% /release /incremental:no +@set LJCRTDBG= +:ENDDEBUG @if "%1"=="amalg" goto :AMALGDLL @if "%1"=="static" goto :STATIC -%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c +@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% +%LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c @if errorlevel 1 goto :BAD %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj @if errorlevel 1 goto :BAD @goto :MTDLL :STATIC +@shift +@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG% %LJCOMPILE% lj_*.c lib_*.c @if errorlevel 1 goto :BAD %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj @if errorlevel 1 goto :BAD @goto :MTDLL :AMALGDLL -%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c +@shift +@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% +%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c @if errorlevel 1 goto :BAD %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj @if errorlevel 1 goto :BAD diff --git a/build.py b/build.py new file mode 100755 index 0000000..9c71271 --- /dev/null +++ b/build.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +import argparse +import os +import shutil + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--prefix") + args = parser.parse_args() + src_dir = os.path.dirname(os.path.realpath(__file__)) + shutil.copytree(src_dir, os.path.basename(src_dir)) + os.chdir(os.path.basename(src_dir)) + + os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.6" + os.environ["DEFAULT_CC"] = os.environ.get("CC", "") + os.environ["TARGET_CFLAGS"] = os.environ.get("CFLAGS", "") + " -fno-function-sections -fno-data-sections" + os.environ["TARGET_LDFLAGS"] = os.environ.get("CFLAGS", "") + " -fno-function-sections -fno-data-sections" + os.environ["CFLAGS"] = "" + # LuaJIT compile process build a tool `buildvm` and use it, building `buildvm` with ASAN + # will cause LSAN detect its leak and fail the build, set exitcode to 0 to make LSAN doesn't + # fail on it. + os.environ["LSAN_OPTIONS"] = "exitcode=0" + + if "ENVOY_MSAN" in os.environ: + os.environ["HOST_CFLAGS"] = "-fno-sanitize=memory" + os.environ["HOST_LDFLAGS"] = "-fno-sanitize=memory" + + # Remove LuaJIT from ASAN for now. + # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved. + if "ENVOY_CONFIG_ASAN" in os.environ or "ENVOY_CONFIG_MSAN" in os.environ: + os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_luajit_luajit/clang-asan-blocklist.txt" % os.environ["PWD"] + with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") + + os.system('make -j{} V=1 PREFIX="{}" install'.format(os.cpu_count(), args.prefix)) + +def win_main(): + src_dir = os.path.dirname(os.path.realpath(__file__)) + dst_dir = os.getcwd() + "/luajit" + shutil.copytree(src_dir, os.path.basename(src_dir)) + os.chdir(os.path.basename(src_dir) + "/src") + os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static') + os.makedirs(dst_dir + "/lib", exist_ok=True) + shutil.copy("lua51.lib", dst_dir + "/lib") + os.makedirs(dst_dir + "/include/luajit-2.1", exist_ok=True) + for header in ["lauxlib.h", "luaconf.h", "lua.h", "lua.hpp", "luajit.h", "lualib.h"]: + shutil.copy(header, dst_dir + "/include/luajit-2.1") + os.makedirs(dst_dir + "/bin", exist_ok=True) + shutil.copy("luajit.exe", dst_dir + "/bin") + +if os.name == 'nt': + win_main() +else: + main() + ================================================ FILE: bazel/foreign_cc/moonjit.patch ================================================ diff --git a/build.py b/build.py new file mode 100644 index 00000000..dab3606c --- /dev/null +++ b/build.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +import argparse +import os +import shutil + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--prefix") + args = parser.parse_args() + src_dir = os.path.dirname(os.path.realpath(__file__)) + shutil.copytree(src_dir, os.path.basename(src_dir)) + os.chdir(os.path.basename(src_dir)) + + os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.6" + os.environ["DEFAULT_CC"] = os.environ.get("CC", "") + os.environ["TARGET_CFLAGS"] = os.environ.get("CFLAGS", "") + " -fno-function-sections -fno-data-sections" + os.environ["TARGET_LDFLAGS"] = os.environ.get("CFLAGS", "") + " -fno-function-sections -fno-data-sections" + os.environ["CFLAGS"] = "" + # LuaJIT compile process build a tool `buildvm` and use it, building `buildvm` with ASAN + # will cause LSAN detect its leak and fail the build, set exitcode to 0 to make LSAN doesn't + # fail on it. + os.environ["LSAN_OPTIONS"] = "exitcode=0" + + if "ENVOY_MSAN" in os.environ: + os.environ["HOST_CFLAGS"] = "-fno-sanitize=memory" + os.environ["HOST_LDFLAGS"] = "-fno-sanitize=memory" + + # Remove LuaJIT from ASAN for now. + # TODO(htuch): Remove this when https://github.com/envoyproxy/envoy/issues/6084 is resolved. + if "ENVOY_CONFIG_ASAN" in os.environ or "ENVOY_CONFIG_MSAN" in os.environ: + os.environ["TARGET_CFLAGS"] += " -fsanitize-blacklist=%s/com_github_moonjit_moonjit/clang-asan-blocklist.txt" % os.environ["PWD"] + with open("clang-asan-blocklist.txt", "w") as f: + f.write("fun:*\n") + + os.system('make -j{} V=1 PREFIX="{}" install'.format(os.cpu_count(), args.prefix)) + +def win_main(): + src_dir = os.path.dirname(os.path.realpath(__file__)) + dst_dir = os.getcwd() + "/moonjit" + shutil.copytree(src_dir, os.path.basename(src_dir)) + os.chdir(os.path.basename(src_dir) + "/src") + os.system('msvcbuild.bat gc64 ' + os.getenv('WINDOWS_DBG_BUILD', '') + ' static') + os.makedirs(dst_dir + "/lib", exist_ok=True) + shutil.copy("lua51.lib", dst_dir + "/lib") + os.makedirs(dst_dir + "/include/moonjit-2.2", exist_ok=True) + for header in ["lauxlib.h", "luaconf.h", "lua.h", "lua.hpp", "luajit.h", "lualib.h"]: + shutil.copy(header, dst_dir + "/include/moonjit-2.2") + os.makedirs(dst_dir + "/bin", exist_ok=True) + shutil.copy("luajit.exe", dst_dir + "/bin") + +if os.name == 'nt': + win_main() +else: + main() + diff --git a/src/Makefile b/src/Makefile index dad9aeec..e10b3118 100644 --- a/src/Makefile +++ b/src/Makefile @@ -27,7 +27,7 @@ NODOTABIVER= 51 DEFAULT_CC = gcc # # LuaJIT builds as a native 32 or 64 bit binary by default. -CC= $(DEFAULT_CC) +CC ?= $(DEFAULT_CC) # # Use this if you want to force a 32 bit build on a 64 bit multilib OS. #CC= $(DEFAULT_CC) -m32 @@ -71,10 +71,10 @@ CCWARN= -Wall # as dynamic mode. # # Mixed mode creates a static + dynamic library and a statically linked luajit. -BUILDMODE= mixed +#BUILDMODE= mixed # # Static mode creates a static library and a statically linked luajit. -#BUILDMODE= static +BUILDMODE= static # # Dynamic mode creates a dynamic library and a dynamically linked luajit. # Note: this executable will only run when the library is installed! @@ -99,7 +99,7 @@ XCFLAGS= # enabled by default. Some other features that *might* break some existing # code (e.g. __pairs or os.execute() return values) can be enabled here. # Note: this does not provide full compatibility with Lua 5.2 at this time. -#XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT +XCFLAGS+= -DLUAJIT_ENABLE_LUA52COMPAT # # Disable the JIT compiler, i.e. turn LuaJIT into a pure interpreter. #XCFLAGS+= -DLUAJIT_DISABLE_JIT @@ -612,7 +612,7 @@ endif Q= @ E= @echo -#Q= +Q= #E= @: ############################################################################## diff --git a/src/msvcbuild.bat b/src/msvcbuild.bat index c2d2c212..71f24422 100644 --- a/src/msvcbuild.bat +++ b/src/msvcbuild.bat @@ -15,7 +15,7 @@ @setlocal @rem Add more debug flags here, e.g. DEBUGCFLAGS=/DLUA_USE_APICHECK @set DEBUGCFLAGS= -@set LJCOMPILE=cl /nologo /c /O2 /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline +@set LJCOMPILE=cl /nologo /c /W3 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_STDIO_INLINE=__declspec(dllexport)__inline /DLUAJIT_ENABLE_LUA52COMPAT @set LJLINK=link /nologo @set LJMT=mt /nologo @set LJLIB=lib /nologo /nodefaultlib @@ -24,10 +24,9 @@ @set DASC=vm_x86.dasc @set LJDLLNAME=lua51.dll @set LJLIBNAME=lua51.lib -@set BUILDTYPE=release @set ALL_LIB=lib_base.c lib_math.c lib_bit.c lib_string.c lib_table.c lib_io.c lib_os.c lib_package.c lib_debug.c lib_jit.c lib_ffi.c lib_utf8.c -%LJCOMPILE% host\minilua.c +%LJCOMPILE% /O2 host\minilua.c @if errorlevel 1 goto :BAD %LJLINK% /out:minilua.exe minilua.obj @if errorlevel 1 goto :BAD @@ -50,7 +49,7 @@ if exist minilua.exe.manifest^ minilua %DASM% -LN %DASMFLAGS% -o host\buildvm_arch.h %DASC% @if errorlevel 1 goto :BAD -%LJCOMPILE% /I "." /I %DASMDIR% host\buildvm*.c +%LJCOMPILE% /O2 /I "." /I %DASMDIR% host\buildvm*.c @if errorlevel 1 goto :BAD %LJLINK% /out:buildvm.exe buildvm*.obj @if errorlevel 1 goto :BAD @@ -74,25 +73,35 @@ buildvm -m folddef -o lj_folddef.h lj_opt_fold.c @if "%1" neq "debug" goto :NODEBUG @shift -@set BUILDTYPE=debug -@set LJCOMPILE=%LJCOMPILE% /Zi %DEBUGCFLAGS% +@set LJCOMPILE=%LJCOMPILE% /O0 /Z7 +@set LJLINK=%LJLINK% /debug /opt:ref /opt:icf /incremental:no +@set LJCRTDBG=d +@goto :ENDDEBUG :NODEBUG -@set LJLINK=%LJLINK% /%BUILDTYPE% +@set LJCOMPILE=%LJCOMPILE% /O2 /Z7 +@set LJLINK=%LJLINK% /release /incremental:no +@set LJCRTDBG= +:ENDDEBUG @if "%1"=="amalg" goto :AMALGDLL @if "%1"=="static" goto :STATIC -%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL lj_*.c lib_*.c +@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% +LJCOMPILE% /DLUA_BUILD_AS_DLL lj_*.c lib_*.c @if errorlevel 1 goto :BAD %LJLINK% /DLL /out:%LJDLLNAME% lj_*.obj lib_*.obj @if errorlevel 1 goto :BAD @goto :MTDLL :STATIC +@shift +@set LJCOMPILE=%LJCOMPILE% /MT%LJCRTDBG% %LJCOMPILE% lj_*.c lib_*.c @if errorlevel 1 goto :BAD %LJLIB% /OUT:%LJLIBNAME% lj_*.obj lib_*.obj @if errorlevel 1 goto :BAD @goto :MTDLL :AMALGDLL -%LJCOMPILE% /MD /DLUA_BUILD_AS_DLL ljamalg.c +@shift +@set LJCOMPILE=%LJCOMPILE% /MD%LJCRTDBG% +%LJCOMPILE% /DLUA_BUILD_AS_DLL ljamalg.c @if errorlevel 1 goto :BAD %LJLINK% /DLL /out:%LJDLLNAME% ljamalg.obj lj_vm.obj @if errorlevel 1 goto :BAD ================================================ FILE: bazel/foreign_cc/nghttp2.patch ================================================ diff --git a/CMakeLists.txt b/CMakeLists.txt index 35c77d1d..47bd63f5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -273,7 +273,11 @@ check_type_size("ssize_t" SIZEOF_SSIZE_T) if(SIZEOF_SSIZE_T STREQUAL "") # ssize_t is a signed type in POSIX storing at least -1. # Set it to "int" to match the behavior of AC_TYPE_SSIZE_T (autotools). - set(ssize_t int) + if(WIN32 AND CMAKE_SIZEOF_VOID_P EQUAL 8) + set(ssize_t ptrdiff_t) + else() + set(ssize_t int) + endif() endif() # AC_TYPE_UINT8_T # AC_TYPE_UINT16_T # https://github.com/nghttp2/nghttp2/pull/1468 diff --git a/lib/nghttp2_buf.c b/lib/nghttp2_buf.c index 2a435bebf..92f97f7f2 100644 --- a/lib/nghttp2_buf.c +++ b/lib/nghttp2_buf.c @@ -82,8 +82,10 @@ void nghttp2_buf_reset(nghttp2_buf *buf) { } void nghttp2_buf_wrap_init(nghttp2_buf *buf, uint8_t *begin, size_t len) { - buf->begin = buf->pos = buf->last = buf->mark = begin; - buf->end = begin + len; + buf->begin = buf->pos = buf->last = buf->mark = buf->end = begin; + if (buf->end != NULL) { + buf->end += len; + } } static int buf_chain_new(nghttp2_buf_chain **chain, size_t chunk_length, diff --git a/lib/nghttp2_frame.c b/lib/nghttp2_frame.c index 4821de408..940c723b0 100644 --- a/lib/nghttp2_frame.c +++ b/lib/nghttp2_frame.c @@ -818,8 +818,10 @@ int nghttp2_frame_unpack_origin_payload(nghttp2_extension *frame, size_t len = 0; origin = frame->payload; - p = payload; - end = p + payloadlen; + p = end = payload; + if (end != NULL) { + end += payloadlen; + } for (; p != end;) { if (end - p < 2) { diff --git a/lib/nghttp2_session.c b/lib/nghttp2_session.c index 563ccd7de..794f141a1 100644 --- a/lib/nghttp2_session.c +++ b/lib/nghttp2_session.c @@ -5349,7 +5349,7 @@ static ssize_t inbound_frame_effective_readlen(nghttp2_inbound_frame *iframe, ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, size_t inlen) { - const uint8_t *first = in, *last = in + inlen; + const uint8_t *first = in, *last = in; nghttp2_inbound_frame *iframe = &session->iframe; size_t readlen; ssize_t padlen; @@ -5360,6 +5360,10 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, size_t pri_fieldlen; nghttp2_mem *mem; + if (in != NULL) { + last += inlen; + } + DEBUGF("recv: connection recv_window_size=%d, local_window=%d\n", session->recv_window_size, session->local_window_size); @@ -5389,7 +5393,9 @@ ssize_t nghttp2_session_mem_recv(nghttp2_session *session, const uint8_t *in, } iframe->payloadleft -= readlen; - in += readlen; + if (in != NULL) { + in += readlen; + } if (iframe->payloadleft == 0) { session_inbound_frame_reset(session); ================================================ FILE: bazel/foreign_cc/zlib.patch ================================================ diff --git a/CMakeLists.txt b/CMakeLists.txt index 0fe939d..2f0475a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -229,21 +229,22 @@ endif() #============================================================================ # Example binaries #============================================================================ - -add_executable(example test/example.c) -target_link_libraries(example zlib) -add_test(example example) - -add_executable(minigzip test/minigzip.c) -target_link_libraries(minigzip zlib) - -if(HAVE_OFF64_T) - add_executable(example64 test/example.c) - target_link_libraries(example64 zlib) - set_target_properties(example64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") - add_test(example64 example64) - - add_executable(minigzip64 test/minigzip.c) - target_link_libraries(minigzip64 zlib) - set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") +if(NOT SKIP_BUILD_EXAMPLES) + add_executable(example test/example.c) + target_link_libraries(example zlib) + add_test(example example) + + add_executable(minigzip test/minigzip.c) + target_link_libraries(minigzip zlib) + + if(HAVE_OFF64_T) + add_executable(example64 test/example.c) + target_link_libraries(example64 zlib) + set_target_properties(example64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") + add_test(example64 example64) + + add_executable(minigzip64 test/minigzip.c) + target_link_libraries(minigzip64 zlib) + set_target_properties(minigzip64 PROPERTIES COMPILE_FLAGS "-D_FILE_OFFSET_BITS=64") + endif() endif() ================================================ FILE: bazel/gen_sh_test_runner.sh ================================================ #!/bin/bash # Used in a genrule to wrap sh_test script for execution in # //test/coverage:coverage_tests single binary. # Do not generate test suites for empty source files. if [ -z "$1" ]; then exit 0 fi RAW_TEST_NAME="$(basename "$1")" # Normalize to something we can use in a TEST(ShTest, ...) name TEST_NAME="${RAW_TEST_NAME//./_}" EXEC_ARGS="\"$1\"" shift for a in "$@" do EXEC_ARGS="${EXEC_ARGS}, \"$a\"" done ( cat << EOF #include "test/test_common/environment.h" #include "gtest/gtest.h" TEST(ShTest, ${TEST_NAME}) { Envoy::TestEnvironment::exec({${EXEC_ARGS}}); } EOF ) ================================================ FILE: bazel/genrule_repository.bzl ================================================ def _genrule_repository(ctx): ctx.download_and_extract( ctx.attr.urls, "", # output ctx.attr.sha256, "", # type ctx.attr.strip_prefix, ) for ii, patch in enumerate(ctx.attr.patches): patch_input = "patch-input-%d.patch" % (ii,) ctx.symlink(patch, patch_input) patch_result = ctx.execute(["patch", "-p0", "--input", patch_input]) if patch_result.return_code != 0: fail("Failed to apply patch %r: %s" % (patch, patch_result.stderr)) # https://github.com/bazelbuild/bazel/issues/3766 genrule_cmd_file = Label("@envoy//bazel").relative(str(ctx.attr.genrule_cmd_file)) ctx.symlink(genrule_cmd_file, "_envoy_genrule_cmd.genrule_cmd") cat_genrule_cmd = ctx.execute(["cat", "_envoy_genrule_cmd.genrule_cmd"]) if cat_genrule_cmd.return_code != 0: fail("Failed to read genrule command %r: %s" % ( genrule_cmd_file, cat_genrule_cmd.stderr, )) ctx.file("WORKSPACE", "workspace(name=%r)" % (ctx.name,)) ctx.symlink(ctx.attr.build_file, "BUILD.bazel") # Inject the genrule_cmd content into a .bzl file that can be loaded # from the repository BUILD file. We force the user to look up the # command content "by label" so the inclusion source is obvious. ctx.file("genrule_cmd.bzl", """ _GENRULE_CMD = {%r: %r} def genrule_cmd(label): return _GENRULE_CMD[label] """ % (str(genrule_cmd_file), cat_genrule_cmd.stdout)) genrule_repository = repository_rule( attrs = { "urls": attr.string_list( mandatory = True, allow_empty = False, ), "sha256": attr.string(), "strip_prefix": attr.string(), "patches": attr.label_list( allow_files = [".patch"], allow_empty = True, ), "genrule_cmd_file": attr.label( mandatory = True, allow_single_file = [".genrule_cmd"], ), "build_file": attr.label( mandatory = True, allow_single_file = [".BUILD"], ), }, implementation = _genrule_repository, ) def _genrule_cc_deps(ctx): outs = depset() for dep in ctx.attr.deps: outs = dep.cc.transitive_headers + dep.cc.libs + outs return DefaultInfo(files = outs) genrule_cc_deps = rule( attrs = { "deps": attr.label_list( providers = [], # CcStarlarkApiProvider mandatory = True, allow_empty = False, ), }, implementation = _genrule_cc_deps, ) def _absolute_bin(path): # If the binary path looks like it's relative to the current directory, # transform it to be absolute by appending "${PWD}". if "/" in path and not path.startswith("/"): return '"${PWD}"/%r' % (path,) return "%r" % (path,) def _genrule_environment(ctx): lines = [] # Bazel uses the same command for C and C++ compilation. c_compiler = ctx.var["CC"] # Bare minimum cflags to get included test binaries to link. # # See .bazelrc for the full set. asan_flags = ["-fsanitize=address,undefined"] tsan_flags = ["-fsanitize=thread"] # Older versions of GCC in Ubuntu, including GCC 5 used in CI images, # incorrectly invoke the older `/usr/bin/ld` with gold-specific options when # building with sanitizers enabled. Work around this by forcing use of gold # in sanitize mode. # # This is not a great solution because it doesn't detect GCC when Bazel has # wrapped it in an intermediate script, but it works well enough to keep CI # running. # # https://stackoverflow.com/questions/37603238/fsanitize-not-using-gold-linker-in-gcc-6-1 force_ld = [] if "clang" in c_compiler: force_ld = ["-fuse-ld=lld"] elif "gcc" in c_compiler or "g++" in c_compiler: force_ld = ["-fuse-ld=gold"] cc_flags = [] ld_flags = [] ld_libs = [] if ctx.var.get("ENVOY_CONFIG_COVERAGE"): ld_libs.append("-lgcov") if ctx.var.get("ENVOY_CONFIG_ASAN"): cc_flags += asan_flags ld_flags += asan_flags ld_flags += force_ld if ctx.var.get("ENVOY_CONFIG_TSAN"): cc_flags += tsan_flags ld_flags += tsan_flags ld_flags += force_ld lines.append("export CFLAGS=%r" % (" ".join(cc_flags),)) lines.append("export LDFLAGS=%r" % (" ".join(ld_flags),)) lines.append("export LIBS=%r" % (" ".join(ld_libs),)) lines.append("export CC=%s" % (_absolute_bin(c_compiler),)) lines.append("export CXX=%s" % (_absolute_bin(c_compiler),)) # Some Autoconf helper binaries leak, which makes ./configure think the # system is unable to do anything. Turn off leak checking during part of # the build. lines.append("export ASAN_OPTIONS=detect_leaks=0") lines.append("") out = ctx.actions.declare_file(ctx.attr.name + ".sh") ctx.actions.write(out, "\n".join(lines)) return DefaultInfo(files = depset([out])) genrule_environment = rule( implementation = _genrule_environment, ) ================================================ FILE: bazel/get_workspace_status ================================================ #!/bin/bash # This file was imported from https://github.com/bazelbuild/bazel at d6fec93. # This script will be run bazel when building process starts to # generate key-value information that represents the status of the # workspace. The output should be like # # KEY1 VALUE1 # KEY2 VALUE2 # # If the script exits with non-zero code, it's considered as a failure # and the output will be discarded. # For Envoy in particular, we want to force binaries to relink when the Git # SHA changes (https://github.com/envoyproxy/envoy/issues/2551). This can be # done by prefixing keys with "STABLE_". To avoid breaking compatibility with # other status scripts, this one still echos the non-stable ("volatile") names. # If this SOURCE_VERSION file exists then it must have been placed here by a # distribution doing a non-git, source build. # Distributions would be expected to echo the commit/tag as BUILD_SCM_REVISION if [ -f SOURCE_VERSION ] then echo "BUILD_SCM_REVISION $(cat SOURCE_VERSION)" echo "STABLE_BUILD_SCM_REVISION $(cat SOURCE_VERSION)" echo "BUILD_SCM_STATUS Distribution" exit 0 fi # The code below presents an implementation that works for git repository git_rev=$(git rev-parse HEAD) || exit 1 echo "BUILD_SCM_REVISION ${git_rev}" echo "STABLE_BUILD_SCM_REVISION ${git_rev}" # Check whether there are any uncommitted changes tree_status="Clean" git diff-index --quiet HEAD -- || { tree_status="Modified" } echo "BUILD_SCM_STATUS ${tree_status}" echo "STABLE_BUILD_SCM_STATUS ${tree_status}" ================================================ FILE: bazel/io_opentracing_cpp.patch ================================================ diff --git a/mocktracer/BUILD b/mocktracer/BUILD index 3b22bab..d425e2e 100644 --- a/mocktracer/BUILD +++ b/mocktracer/BUILD @@ -7,11 +7,13 @@ cc_library( deps = [ "//:opentracing", ], + alwayslink = 1, ) cc_binary( name = "libmocktracer_plugin.so", linkshared = 1, + linkstatic = 1, visibility = ["//visibility:public"], deps = [ "//mocktracer:mocktracer" diff --git a/src/dynamic_load_unix.cpp b/src/dynamic_load_unix.cpp index 17e08fd..7e8ac02 100644 --- a/src/dynamic_load_unix.cpp +++ b/src/dynamic_load_unix.cpp @@ -35,7 +35,13 @@ DynamicallyLoadTracingLibrary(const char* shared_library, std::string& error_message) noexcept try { dlerror(); // Clear any existing error. - const auto handle = dlopen(shared_library, RTLD_NOW | RTLD_LOCAL); + const auto handle = dlopen(shared_library, RTLD_NOW | RTLD_LOCAL +#if defined(__has_feature) +#if __has_feature(address_sanitizer) + | RTLD_NODELETE +#endif +#endif + ); if (handle == nullptr) { error_message = dlerror(); return make_unexpected(dynamic_load_failure_error); # commit 3a6f049c123a1906c7381e824292c18fd8698293 # Author: Christian Neumüller # Date: Wed Feb 27 01:48:17 2019 +0100 # # Fix MSVC compiler flags. (#104) # # * All debug specific flags would be replaced by release specific on MSVC. # * The OPENTRACING_STATIC flag would be missing from OpenTracingConfig.cmake when linking against OpenTracing::opentracing-static # diff --git a/CMakeLists.txt b/CMakeLists.txt index 1721fb3..3873b3a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -52,7 +52,7 @@ if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra") elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_RELEASE} -D_SCL_SECURE_NO_WARNINGS") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_SCL_SECURE_NO_WARNINGS") endif() # ============================================================================== ================================================ FILE: bazel/protobuf.patch ================================================ # https://github.com/protocolbuffers/protobuf/pull/6720 diff --git a/third_party/BUILD b/third_party/BUILD new file mode 100644 index 0000000000..b66101a39a --- /dev/null +++ b/third_party/BUILD @@ -0,0 +1 @@ +exports_files(["six.BUILD", "zlib.BUILD"]) # https://github.com/protocolbuffers/protobuf/pull/6896 diff --git a/src/google/protobuf/stubs/strutil.cc b/src/google/protobuf/stubs/strutil.cc index 62b3f0a871..bb3df47ccf 100644 --- a/src/google/protobuf/stubs/strutil.cc +++ b/src/google/protobuf/stubs/strutil.cc @@ -1435,32 +1435,44 @@ AlphaNum::AlphaNum(strings::Hex hex) { // after the area just overwritten. It comes in multiple flavors to minimize // call overhead. static char *Append1(char *out, const AlphaNum &x) { - memcpy(out, x.data(), x.size()); - return out + x.size(); + if (x.size() > 0) { + memcpy(out, x.data(), x.size()); + out += x.size(); + } + return out; } static char *Append2(char *out, const AlphaNum &x1, const AlphaNum &x2) { - memcpy(out, x1.data(), x1.size()); - out += x1.size(); - - memcpy(out, x2.data(), x2.size()); - return out + x2.size(); + if (x1.size() > 0) { + memcpy(out, x1.data(), x1.size()); + out += x1.size(); + } + if (x2.size() > 0) { + memcpy(out, x2.data(), x2.size()); + out += x2.size(); + } + return out; } -static char *Append4(char *out, - const AlphaNum &x1, const AlphaNum &x2, +static char *Append4(char *out, const AlphaNum &x1, const AlphaNum &x2, const AlphaNum &x3, const AlphaNum &x4) { - memcpy(out, x1.data(), x1.size()); - out += x1.size(); - - memcpy(out, x2.data(), x2.size()); - out += x2.size(); - - memcpy(out, x3.data(), x3.size()); - out += x3.size(); - - memcpy(out, x4.data(), x4.size()); - return out + x4.size(); + if (x1.size() > 0) { + memcpy(out, x1.data(), x1.size()); + out += x1.size(); + } + if (x2.size() > 0) { + memcpy(out, x2.data(), x2.size()); + out += x2.size(); + } + if (x3.size() > 0) { + memcpy(out, x3.data(), x3.size()); + out += x3.size(); + } + if (x4.size() > 0) { + memcpy(out, x4.data(), x4.size()); + out += x4.size(); + } + return out; } string StrCat(const AlphaNum &a, const AlphaNum &b) { # patching for zlib binding diff --git a/BUILD b/BUILD index efc3d8e7f..746ad4851 100644 --- a/BUILD +++ b/BUILD @@ -24,7 +24,7 @@ config_setting( # ZLIB configuration ################################################################################ -ZLIB_DEPS = ["@zlib//:zlib"] +ZLIB_DEPS = ["//external:zlib"] ################################################################################ # Protobuf Runtime Library diff --git a/protobuf.bzl b/protobuf.bzl index 5fa5543b1..484bc41a7 100644 --- a/protobuf.bzl +++ b/protobuf.bzl @@ -75,18 +75,17 @@ def _RelativeOutputPath(path, include, dest = ""): def _proto_gen_impl(ctx): """General implementation for generating protos""" srcs = ctx.files.srcs - deps = [] - deps += ctx.files.srcs + deps = depset(direct=ctx.files.srcs) source_dir = _SourceDir(ctx) gen_dir = _GenDir(ctx).rstrip("/") if source_dir: - import_flags = ["-I" + source_dir, "-I" + gen_dir] + import_flags = depset(direct=["-I" + source_dir, "-I" + gen_dir]) else: - import_flags = ["-I."] + import_flags = depset(direct=["-I."]) for dep in ctx.attr.deps: - import_flags += dep.proto.import_flags - deps += dep.proto.deps + import_flags = depset(transitive=[import_flags, dep.proto.import_flags]) + deps = depset(transitive=[deps, dep.proto.deps]) if not ctx.attr.gen_cc and not ctx.attr.gen_py and not ctx.executable.plugin: return struct( @@ -103,7 +102,7 @@ def _proto_gen_impl(ctx): in_gen_dir = src.root.path == gen_dir if in_gen_dir: import_flags_real = [] - for f in depset(import_flags).to_list(): + for f in import_flags.to_list(): path = f.replace("-I", "") import_flags_real.append("-I$(realpath -s %s)" % path) @@ -118,7 +117,7 @@ def _proto_gen_impl(ctx): outs.extend(_PyOuts([src.basename], use_grpc_plugin = use_grpc_plugin)) outs = [ctx.actions.declare_file(out, sibling = src) for out in outs] - inputs = [src] + deps + inputs = [src] + deps.to_list() tools = [ctx.executable.protoc] if ctx.executable.plugin: plugin = ctx.executable.plugin @@ -141,7 +140,7 @@ def _proto_gen_impl(ctx): inputs = inputs, tools = tools, outputs = outs, - arguments = args + import_flags + [src.path], + arguments = args + import_flags.to_list() + [src.path], executable = ctx.executable.protoc, mnemonic = "ProtoCompile", use_default_shell_env = True, ================================================ FILE: bazel/repositories.bzl ================================================ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") load(":dev_binding.bzl", "envoy_dev_binding") load(":genrule_repository.bzl", "genrule_repository") load("@envoy_api//bazel:envoy_http_archive.bzl", "envoy_http_archive") load(":repository_locations.bzl", "DEPENDENCY_ANNOTATIONS", "DEPENDENCY_REPOSITORIES", "USE_CATEGORIES", "USE_CATEGORIES_WITH_CPE_OPTIONAL") load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language") load(":crates.bzl", "raze_fetch_remote_crates") PPC_SKIP_TARGETS = ["envoy.filters.http.lua"] WINDOWS_SKIP_TARGETS = [ "envoy.tracers.dynamic_ot", "envoy.tracers.lightstep", "envoy.tracers.datadog", "envoy.tracers.opencensus", "envoy.watchdog.abort_action", ] # Make all contents of an external repository accessible under a filegroup. Used for external HTTP # archives, e.g. cares. BUILD_ALL_CONTENT = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])""" def _build_all_content(exclude = []): return """filegroup(name = "all", srcs = glob(["**"], exclude={}), visibility = ["//visibility:public"])""".format(repr(exclude)) def _fail_missing_attribute(attr, key): fail("The '%s' attribute must be defined for external dependecy " % attr + key) # Method for verifying content of the DEPENDENCY_REPOSITORIES defined in bazel/repository_locations.bzl # Verification is here so that bazel/repository_locations.bzl can be loaded into other tools written in Python, # and as such needs to be free of bazel specific constructs. # # We also remove the attributes for further consumption in this file, since rules such as http_archive # don't recognize them. def _repository_locations(): locations = {} for key, location in DEPENDENCY_REPOSITORIES.items(): mutable_location = dict(location) locations[key] = mutable_location if "sha256" not in location or len(location["sha256"]) == 0: _fail_missing_attribute("sha256", key) if "project_name" not in location: _fail_missing_attribute("project_name", key) mutable_location.pop("project_name") if "project_desc" not in location: _fail_missing_attribute("project_desc", key) mutable_location.pop("project_desc") if "project_url" not in location: _fail_missing_attribute("project_url", key) project_url = mutable_location.pop("project_url") if not project_url.startswith("https://") and not project_url.startswith("http://"): fail("project_url must start with https:// or http://: " + project_url) if "version" not in location: _fail_missing_attribute("version", key) mutable_location.pop("version") if "use_category" not in location: _fail_missing_attribute("use_category", key) use_category = mutable_location.pop("use_category") if "dataplane_ext" in use_category or "observability_ext" in use_category: if "extensions" not in location: _fail_missing_attribute("extensions", key) mutable_location.pop("extensions") if "last_updated" not in location: _fail_missing_attribute("last_updated", key) last_updated = mutable_location.pop("last_updated") # Starlark doesn't have regexes. if len(last_updated) != 10 or last_updated[4] != "-" or last_updated[7] != "-": fail("last_updated must match YYYY-DD-MM: " + last_updated) if "cpe" in location: cpe = mutable_location.pop("cpe") # Starlark doesn't have regexes. cpe_matches = (cpe != "N/A" and (not cpe.startswith("cpe:2.3:a:") or not cpe.endswith(":*") and len(cpe.split(":")) != 6)) if cpe_matches: fail("CPE must match cpe:2.3:a:::*: " + cpe) elif not [category for category in USE_CATEGORIES_WITH_CPE_OPTIONAL if category in location["use_category"]]: _fail_missing_attribute("cpe", key) for category in location["use_category"]: if category not in USE_CATEGORIES: fail("Unknown use_category value '" + category + "' for dependecy " + key) return locations REPOSITORY_LOCATIONS = _repository_locations() # To initialize http_archive REPOSITORY_LOCATIONS dictionaries must be stripped of annotations. # See repository_locations.bzl for the list of annotation attributes. def _get_location(dependency): stripped = dict(REPOSITORY_LOCATIONS[dependency]) for attribute in DEPENDENCY_ANNOTATIONS: stripped.pop(attribute, None) return stripped def _repository_impl(name, **kwargs): envoy_http_archive( name, locations = REPOSITORY_LOCATIONS, **kwargs ) def _default_envoy_build_config_impl(ctx): ctx.file("WORKSPACE", "") ctx.file("BUILD.bazel", "") ctx.symlink(ctx.attr.config, "extensions_build_config.bzl") _default_envoy_build_config = repository_rule( implementation = _default_envoy_build_config_impl, attrs = { "config": attr.label(default = "@envoy//source/extensions:extensions_build_config.bzl"), }, ) # Python dependencies. def _python_deps(): # TODO(htuch): convert these to pip3_import. _repository_impl( name = "com_github_twitter_common_lang", build_file = "@envoy//bazel/external:twitter_common_lang.BUILD", ) _repository_impl( name = "com_github_twitter_common_rpc", build_file = "@envoy//bazel/external:twitter_common_rpc.BUILD", ) _repository_impl( name = "com_github_twitter_common_finagle_thrift", build_file = "@envoy//bazel/external:twitter_common_finagle_thrift.BUILD", ) _repository_impl( name = "six", build_file = "@com_google_protobuf//third_party:six.BUILD", ) # Bazel native C++ dependencies. For the dependencies that doesn't provide autoconf/automake builds. def _cc_deps(): _repository_impl("grpc_httpjson_transcoding") native.bind( name = "path_matcher", actual = "@grpc_httpjson_transcoding//src:path_matcher", ) native.bind( name = "grpc_transcoding", actual = "@grpc_httpjson_transcoding//src:transcoding", ) def _go_deps(skip_targets): # Keep the skip_targets check around until Istio Proxy has stopped using # it to exclude the Go rules. if "io_bazel_rules_go" not in skip_targets: _repository_impl( name = "io_bazel_rules_go", # TODO(wrowe, sunjayBhatia): remove when Windows RBE supports batch file invocation patch_args = ["-p1"], patches = ["@envoy//bazel:rules_go.patch"], ) _repository_impl("bazel_gazelle") def _rust_deps(): _repository_impl("io_bazel_rules_rust") raze_fetch_remote_crates() def envoy_dependencies(skip_targets = []): # Setup Envoy developer tools. envoy_dev_binding() # Treat Envoy's overall build config as an external repo, so projects that # build Envoy as a subcomponent can easily override the config. if "envoy_build_config" not in native.existing_rules().keys(): _default_envoy_build_config(name = "envoy_build_config") # Setup external Bazel rules _foreign_cc_dependencies() # Binding to an alias pointing to the selected version of BoringSSL: # - BoringSSL FIPS from @boringssl_fips//:ssl, # - non-FIPS BoringSSL from @boringssl//:ssl. _boringssl() _boringssl_fips() native.bind( name = "ssl", actual = "@envoy//bazel:boringssl", ) # The long repo names (`com_github_fmtlib_fmt` instead of `fmtlib`) are # semi-standard in the Bazel community, intended to avoid both duplicate # dependencies and name conflicts. _com_github_c_ares_c_ares() _com_github_circonus_labs_libcircllhist() _com_github_cyan4973_xxhash() _com_github_datadog_dd_opentracing_cpp() _com_github_mirror_tclap() _com_github_envoyproxy_sqlparser() _com_github_fmtlib_fmt() _com_github_gabime_spdlog() _com_github_google_benchmark() _com_github_google_jwt_verify() _com_github_google_libprotobuf_mutator() _com_github_google_tcmalloc() _com_github_gperftools_gperftools() _com_github_grpc_grpc() _com_github_jbeder_yaml_cpp() _com_github_libevent_libevent() _com_github_luajit_luajit() _com_github_moonjit_moonjit() _com_github_nghttp2_nghttp2() _com_github_nodejs_http_parser() _com_github_tencent_rapidjson() _com_google_absl() _com_google_googletest() _com_google_protobuf() _io_opencensus_cpp() _com_github_curl() _com_github_envoyproxy_sqlparser() _com_googlesource_chromium_v8() _com_googlesource_quiche() _com_googlesource_googleurl() _com_lightstep_tracer_cpp() _io_opentracing_cpp() _net_zlib() _com_github_zlib_ng_zlib_ng() _upb() _proxy_wasm_cpp_sdk() _proxy_wasm_cpp_host() _emscripten_toolchain() _repository_impl("com_googlesource_code_re2") _com_google_cel_cpp() _repository_impl("com_github_google_flatbuffers") _repository_impl("bazel_toolchains") _repository_impl("bazel_compdb") _repository_impl("envoy_build_tools") _repository_impl("rules_cc") # Unconditional, since we use this only for compiler-agnostic fuzzing utils. _org_llvm_releases_compiler_rt() _python_deps() _cc_deps() _go_deps(skip_targets) _rust_deps() _kafka_deps() _org_llvm_llvm() _com_github_wavm_wavm() switched_rules_by_language( name = "com_google_googleapis_imports", cc = True, go = True, grpc = True, rules_override = { "py_proto_library": "@envoy_api//bazel:api_build_system.bzl", }, ) native.bind( name = "bazel_runfiles", actual = "@bazel_tools//tools/cpp/runfiles", ) def _boringssl(): _repository_impl( name = "boringssl", patch_args = ["-p1"], patches = ["@envoy//bazel:boringssl_static.patch"], ) def _boringssl_fips(): location = REPOSITORY_LOCATIONS["boringssl_fips"] genrule_repository( name = "boringssl_fips", urls = location["urls"], sha256 = location["sha256"], genrule_cmd_file = "@envoy//bazel/external:boringssl_fips.genrule_cmd", build_file = "@envoy//bazel/external:boringssl_fips.BUILD", patches = ["@envoy//bazel/external:boringssl_fips.patch"], ) def _com_github_circonus_labs_libcircllhist(): _repository_impl( name = "com_github_circonus_labs_libcircllhist", build_file = "@envoy//bazel/external:libcircllhist.BUILD", ) native.bind( name = "libcircllhist", actual = "@com_github_circonus_labs_libcircllhist//:libcircllhist", ) def _com_github_c_ares_c_ares(): location = _get_location("com_github_c_ares_c_ares") http_archive( name = "com_github_c_ares_c_ares", build_file_content = BUILD_ALL_CONTENT, **location ) native.bind( name = "ares", actual = "@envoy//bazel/foreign_cc:ares", ) def _com_github_cyan4973_xxhash(): _repository_impl( name = "com_github_cyan4973_xxhash", build_file = "@envoy//bazel/external:xxhash.BUILD", ) native.bind( name = "xxhash", actual = "@com_github_cyan4973_xxhash//:xxhash", ) def _com_github_envoyproxy_sqlparser(): _repository_impl( name = "com_github_envoyproxy_sqlparser", build_file = "@envoy//bazel/external:sqlparser.BUILD", ) native.bind( name = "sqlparser", actual = "@com_github_envoyproxy_sqlparser//:sqlparser", ) def _com_github_mirror_tclap(): _repository_impl( name = "com_github_mirror_tclap", build_file = "@envoy//bazel/external:tclap.BUILD", patch_args = ["-p1"], # If and when we pick up tclap 1.4 or later release, # this entire issue was refactored away 6 years ago; # https://sourceforge.net/p/tclap/code/ci/5d4ffbf2db794af799b8c5727fb6c65c079195ac/ # https://github.com/envoyproxy/envoy/pull/8572#discussion_r337554195 patches = ["@envoy//bazel:tclap-win64-ull-sizet.patch"], ) native.bind( name = "tclap", actual = "@com_github_mirror_tclap//:tclap", ) def _com_github_fmtlib_fmt(): _repository_impl( name = "com_github_fmtlib_fmt", build_file = "@envoy//bazel/external:fmtlib.BUILD", ) native.bind( name = "fmtlib", actual = "@com_github_fmtlib_fmt//:fmtlib", ) def _com_github_gabime_spdlog(): _repository_impl( name = "com_github_gabime_spdlog", build_file = "@envoy//bazel/external:spdlog.BUILD", ) native.bind( name = "spdlog", actual = "@com_github_gabime_spdlog//:spdlog", ) def _com_github_google_benchmark(): location = _get_location("com_github_google_benchmark") http_archive( name = "com_github_google_benchmark", **location ) native.bind( name = "benchmark", actual = "@com_github_google_benchmark//:benchmark", ) def _com_github_google_libprotobuf_mutator(): _repository_impl( name = "com_github_google_libprotobuf_mutator", build_file = "@envoy//bazel/external:libprotobuf_mutator.BUILD", ) def _com_github_jbeder_yaml_cpp(): _repository_impl( name = "com_github_jbeder_yaml_cpp", ) native.bind( name = "yaml_cpp", actual = "@com_github_jbeder_yaml_cpp//:yaml-cpp", ) def _com_github_libevent_libevent(): location = _get_location("com_github_libevent_libevent") http_archive( name = "com_github_libevent_libevent", build_file_content = BUILD_ALL_CONTENT, **location ) native.bind( name = "event", actual = "@envoy//bazel/foreign_cc:event", ) def _net_zlib(): _repository_impl( name = "net_zlib", build_file_content = BUILD_ALL_CONTENT, patch_args = ["-p1"], patches = ["@envoy//bazel/foreign_cc:zlib.patch"], ) native.bind( name = "zlib", actual = "@envoy//bazel/foreign_cc:zlib", ) # Bind for grpc. native.bind( name = "madler_zlib", actual = "@envoy//bazel/foreign_cc:zlib", ) def _com_github_zlib_ng_zlib_ng(): _repository_impl( name = "com_github_zlib_ng_zlib_ng", build_file_content = BUILD_ALL_CONTENT, ) def _com_google_cel_cpp(): _repository_impl("com_google_cel_cpp") _repository_impl("rules_antlr") location = _get_location("antlr4_runtimes") http_archive( name = "antlr4_runtimes", build_file_content = """ package(default_visibility = ["//visibility:public"]) cc_library( name = "cpp", srcs = glob(["runtime/Cpp/runtime/src/**/*.cpp"]), hdrs = glob(["runtime/Cpp/runtime/src/**/*.h"]), includes = ["runtime/Cpp/runtime/src"], ) """, patch_args = ["-p1"], # Patches ASAN violation of initialization fiasco patches = ["@envoy//bazel:antlr.patch"], **location ) # Parser dependencies # TODO: upgrade this when cel is upgraded to use the latest version http_archive( name = "rules_antlr", sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429", strip_prefix = "rules_antlr-3cc2f9502a54ceb7b79b37383316b23c4da66f9a", urls = ["https://github.com/marcohu/rules_antlr/archive/3cc2f9502a54ceb7b79b37383316b23c4da66f9a.tar.gz"], ) http_archive( name = "antlr4_runtimes", build_file_content = """ package(default_visibility = ["//visibility:public"]) cc_library( name = "cpp", srcs = glob(["runtime/Cpp/runtime/src/**/*.cpp"]), hdrs = glob(["runtime/Cpp/runtime/src/**/*.h"]), includes = ["runtime/Cpp/runtime/src"], ) """, sha256 = "46f5e1af5f4bd28ade55cb632f9a069656b31fc8c2408f9aa045f9b5f5caad64", patch_args = ["-p1"], # Patches ASAN violation of initialization fiasco patches = ["@envoy//bazel:antlr.patch"], strip_prefix = "antlr4-4.7.2", urls = ["https://github.com/antlr/antlr4/archive/4.7.2.tar.gz"], ) def _com_github_nghttp2_nghttp2(): location = _get_location("com_github_nghttp2_nghttp2") http_archive( name = "com_github_nghttp2_nghttp2", build_file_content = BUILD_ALL_CONTENT, patch_args = ["-p1"], # This patch cannot be picked up due to ABI rules. Better # solve is likely at the next version-major. Discussion at; # https://github.com/nghttp2/nghttp2/pull/1395 # https://github.com/envoyproxy/envoy/pull/8572#discussion_r334067786 patches = ["@envoy//bazel/foreign_cc:nghttp2.patch"], **location ) native.bind( name = "nghttp2", actual = "@envoy//bazel/foreign_cc:nghttp2", ) def _io_opentracing_cpp(): _repository_impl( name = "io_opentracing_cpp", patch_args = ["-p1"], # Workaround for LSAN false positive in https://github.com/envoyproxy/envoy/issues/7647 patches = ["@envoy//bazel:io_opentracing_cpp.patch"], ) native.bind( name = "opentracing", actual = "@io_opentracing_cpp//:opentracing", ) def _com_lightstep_tracer_cpp(): _repository_impl("com_lightstep_tracer_cpp") native.bind( name = "lightstep", actual = "@com_lightstep_tracer_cpp//:manual_tracer_lib", ) def _com_github_datadog_dd_opentracing_cpp(): _repository_impl("com_github_datadog_dd_opentracing_cpp") _repository_impl( name = "com_github_msgpack_msgpack_c", build_file = "@com_github_datadog_dd_opentracing_cpp//:bazel/external/msgpack.BUILD", ) native.bind( name = "dd_opentracing_cpp", actual = "@com_github_datadog_dd_opentracing_cpp//:dd_opentracing_cpp", ) def _com_github_tencent_rapidjson(): _repository_impl( name = "com_github_tencent_rapidjson", build_file = "@envoy//bazel/external:rapidjson.BUILD", ) native.bind( name = "rapidjson", actual = "@com_github_tencent_rapidjson//:rapidjson", ) def _com_github_nodejs_http_parser(): _repository_impl( name = "com_github_nodejs_http_parser", build_file = "@envoy//bazel/external:http-parser.BUILD", ) native.bind( name = "http_parser", actual = "@com_github_nodejs_http_parser//:http_parser", ) def _com_google_googletest(): _repository_impl("com_google_googletest") native.bind( name = "googletest", actual = "@com_google_googletest//:gtest", ) # TODO(jmarantz): replace the use of bind and external_deps with just # the direct Bazel path at all sites. This will make it easier to # pull in more bits of abseil as needed, and is now the preferred # method for pure Bazel deps. def _com_google_absl(): _repository_impl("com_google_absl") native.bind( name = "abseil_any", actual = "@com_google_absl//absl/types:any", ) native.bind( name = "abseil_base", actual = "@com_google_absl//absl/base:base", ) # Bind for grpc. native.bind( name = "absl-base", actual = "@com_google_absl//absl/base", ) native.bind( name = "abseil_flat_hash_map", actual = "@com_google_absl//absl/container:flat_hash_map", ) native.bind( name = "abseil_flat_hash_set", actual = "@com_google_absl//absl/container:flat_hash_set", ) native.bind( name = "abseil_hash", actual = "@com_google_absl//absl/hash:hash", ) native.bind( name = "abseil_hash_testing", actual = "@com_google_absl//absl/hash:hash_testing", ) native.bind( name = "abseil_inlined_vector", actual = "@com_google_absl//absl/container:inlined_vector", ) native.bind( name = "abseil_memory", actual = "@com_google_absl//absl/memory:memory", ) native.bind( name = "abseil_node_hash_map", actual = "@com_google_absl//absl/container:node_hash_map", ) native.bind( name = "abseil_node_hash_set", actual = "@com_google_absl//absl/container:node_hash_set", ) native.bind( name = "abseil_str_format", actual = "@com_google_absl//absl/strings:str_format", ) native.bind( name = "abseil_strings", actual = "@com_google_absl//absl/strings:strings", ) native.bind( name = "abseil_int128", actual = "@com_google_absl//absl/numeric:int128", ) native.bind( name = "abseil_optional", actual = "@com_google_absl//absl/types:optional", ) native.bind( name = "abseil_synchronization", actual = "@com_google_absl//absl/synchronization:synchronization", ) native.bind( name = "abseil_symbolize", actual = "@com_google_absl//absl/debugging:symbolize", ) native.bind( name = "abseil_stacktrace", actual = "@com_google_absl//absl/debugging:stacktrace", ) # Require abseil_time as an indirect dependency as it is needed by the # direct dependency jwt_verify_lib. native.bind( name = "abseil_time", actual = "@com_google_absl//absl/time:time", ) # Bind for grpc. native.bind( name = "absl-time", actual = "@com_google_absl//absl/time:time", ) native.bind( name = "abseil_algorithm", actual = "@com_google_absl//absl/algorithm:algorithm", ) native.bind( name = "abseil_variant", actual = "@com_google_absl//absl/types:variant", ) native.bind( name = "abseil_status", actual = "@com_google_absl//absl/status", ) def _com_google_protobuf(): _repository_impl("rules_python") _repository_impl( "com_google_protobuf", patches = ["@envoy//bazel:protobuf.patch"], patch_args = ["-p1"], ) native.bind( name = "protobuf", actual = "@com_google_protobuf//:protobuf", ) native.bind( name = "protobuf_clib", actual = "@com_google_protobuf//:protoc_lib", ) native.bind( name = "protocol_compiler", actual = "@com_google_protobuf//:protoc", ) native.bind( name = "protoc", actual = "@com_google_protobuf//:protoc", ) # Needed for `bazel fetch` to work with @com_google_protobuf # https://github.com/google/protobuf/blob/v3.6.1/util/python/BUILD#L6-L9 native.bind( name = "python_headers", actual = "@com_google_protobuf//util/python:python_headers", ) def _io_opencensus_cpp(): location = _get_location("io_opencensus_cpp") http_archive( name = "io_opencensus_cpp", **location ) native.bind( name = "opencensus_trace", actual = "@io_opencensus_cpp//opencensus/trace", ) native.bind( name = "opencensus_trace_b3", actual = "@io_opencensus_cpp//opencensus/trace:b3", ) native.bind( name = "opencensus_trace_cloud_trace_context", actual = "@io_opencensus_cpp//opencensus/trace:cloud_trace_context", ) native.bind( name = "opencensus_trace_grpc_trace_bin", actual = "@io_opencensus_cpp//opencensus/trace:grpc_trace_bin", ) native.bind( name = "opencensus_trace_trace_context", actual = "@io_opencensus_cpp//opencensus/trace:trace_context", ) native.bind( name = "opencensus_exporter_ocagent", actual = "@io_opencensus_cpp//opencensus/exporters/trace/ocagent:ocagent_exporter", ) native.bind( name = "opencensus_exporter_stdout", actual = "@io_opencensus_cpp//opencensus/exporters/trace/stdout:stdout_exporter", ) native.bind( name = "opencensus_exporter_stackdriver", actual = "@io_opencensus_cpp//opencensus/exporters/trace/stackdriver:stackdriver_exporter", ) native.bind( name = "opencensus_exporter_zipkin", actual = "@io_opencensus_cpp//opencensus/exporters/trace/zipkin:zipkin_exporter", ) def _com_github_curl(): # Used by OpenCensus Zipkin exporter. location = _get_location("com_github_curl") http_archive( name = "com_github_curl", build_file_content = BUILD_ALL_CONTENT + """ cc_library(name = "curl", visibility = ["//visibility:public"], deps = ["@envoy//bazel/foreign_cc:curl"]) """, **location ) native.bind( name = "curl", actual = "@envoy//bazel/foreign_cc:curl", ) def _com_googlesource_chromium_v8(): location = _get_location("com_googlesource_chromium_v8") genrule_repository( name = "com_googlesource_chromium_v8", genrule_cmd_file = "@envoy//bazel/external:wee8.genrule_cmd", build_file = "@envoy//bazel/external:wee8.BUILD", patches = ["@envoy//bazel/external:wee8.patch"], **location ) native.bind( name = "wee8", actual = "@com_googlesource_chromium_v8//:wee8", ) def _com_googlesource_quiche(): location = REPOSITORY_LOCATIONS["com_googlesource_quiche"] genrule_repository( name = "com_googlesource_quiche", urls = location["urls"], sha256 = location["sha256"], genrule_cmd_file = "@envoy//bazel/external:quiche.genrule_cmd", build_file = "@envoy//bazel/external:quiche.BUILD", ) native.bind( name = "quiche_common_platform", actual = "@com_googlesource_quiche//:quiche_common_platform", ) native.bind( name = "quiche_http2_platform", actual = "@com_googlesource_quiche//:http2_platform", ) native.bind( name = "quiche_spdy_platform", actual = "@com_googlesource_quiche//:spdy_platform", ) native.bind( name = "quiche_quic_platform", actual = "@com_googlesource_quiche//:quic_platform", ) native.bind( name = "quiche_quic_platform_base", actual = "@com_googlesource_quiche//:quic_platform_base", ) def _com_googlesource_googleurl(): _repository_impl( name = "com_googlesource_googleurl", ) native.bind( name = "googleurl", actual = "@com_googlesource_googleurl//url:url", ) def _org_llvm_releases_compiler_rt(): _repository_impl( name = "org_llvm_releases_compiler_rt", build_file = "@envoy//bazel/external:compiler_rt.BUILD", ) def _com_github_grpc_grpc(): _repository_impl("com_github_grpc_grpc") _repository_impl("build_bazel_rules_apple") # Rebind some stuff to match what the gRPC Bazel is expecting. native.bind( name = "protobuf_headers", actual = "@com_google_protobuf//:protobuf_headers", ) native.bind( name = "libssl", actual = "//external:ssl", ) native.bind( name = "cares", actual = "//external:ares", ) native.bind( name = "grpc", actual = "@com_github_grpc_grpc//:grpc++", ) native.bind( name = "grpc_health_proto", actual = "@envoy//bazel:grpc_health_proto", ) native.bind( name = "grpc_alts_fake_handshaker_server", actual = "@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:fake_handshaker_lib", ) native.bind( name = "grpc_alts_handshaker_proto", actual = "@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:handshaker_proto", ) native.bind( name = "grpc_alts_transport_security_common_proto", actual = "@com_github_grpc_grpc//test/core/tsi/alts/fake_handshaker:transport_security_common_proto", ) def _upb(): _repository_impl( name = "upb", patches = ["@envoy//bazel:upb.patch"], patch_args = ["-p1"], ) native.bind( name = "upb_lib", actual = "@upb//:upb", ) def _proxy_wasm_cpp_sdk(): _repository_impl(name = "proxy_wasm_cpp_sdk") def _proxy_wasm_cpp_host(): _repository_impl( name = "proxy_wasm_cpp_host", build_file = "@envoy//bazel/external:proxy_wasm_cpp_host.BUILD", ) def _emscripten_toolchain(): _repository_impl( name = "emscripten_toolchain", build_file_content = _build_all_content(exclude = [ "upstream/emscripten/cache/is_vanilla.txt", ".emscripten_sanity", ]), patch_cmds = REPOSITORY_LOCATIONS["emscripten_toolchain"]["patch_cmds"], ) def _com_github_google_jwt_verify(): _repository_impl("com_github_google_jwt_verify") native.bind( name = "jwt_verify_lib", actual = "@com_github_google_jwt_verify//:jwt_verify_lib", ) def _com_github_luajit_luajit(): location = _get_location("com_github_luajit_luajit") http_archive( name = "com_github_luajit_luajit", build_file_content = BUILD_ALL_CONTENT, patches = ["@envoy//bazel/foreign_cc:luajit.patch"], patch_args = ["-p1"], patch_cmds = ["chmod u+x build.py"], **location ) native.bind( name = "luajit", actual = "@envoy//bazel/foreign_cc:luajit", ) def _com_github_moonjit_moonjit(): location = _get_location("com_github_moonjit_moonjit") http_archive( name = "com_github_moonjit_moonjit", build_file_content = BUILD_ALL_CONTENT, patches = ["@envoy//bazel/foreign_cc:moonjit.patch"], patch_args = ["-p1"], patch_cmds = ["chmod u+x build.py"], **location ) native.bind( name = "moonjit", actual = "@envoy//bazel/foreign_cc:moonjit", ) def _com_github_google_tcmalloc(): _repository_impl( name = "com_github_google_tcmalloc", ) native.bind( name = "tcmalloc", actual = "@com_github_google_tcmalloc//tcmalloc", ) def _com_github_gperftools_gperftools(): location = _get_location("com_github_gperftools_gperftools") http_archive( name = "com_github_gperftools_gperftools", build_file_content = BUILD_ALL_CONTENT, **location ) native.bind( name = "gperftools", actual = "@envoy//bazel/foreign_cc:gperftools", ) def _org_llvm_llvm(): location = _get_location("org_llvm_llvm") http_archive( name = "org_llvm_llvm", build_file_content = BUILD_ALL_CONTENT, patch_args = ["-p1"], patches = ["@envoy//bazel/foreign_cc:llvm.patch"], **location ) native.bind( name = "llvm", actual = "@envoy//bazel/foreign_cc:llvm", ) def _com_github_wavm_wavm(): location = _get_location("com_github_wavm_wavm") http_archive( name = "com_github_wavm_wavm", build_file_content = BUILD_ALL_CONTENT, **location ) native.bind( name = "wavm", actual = "@envoy//bazel/foreign_cc:wavm", ) def _kafka_deps(): # This archive contains Kafka client source code. # We are using request/response message format files to generate parser code. KAFKASOURCE_BUILD_CONTENT = """ filegroup( name = "request_protocol_files", srcs = glob(["*Request.json"]), visibility = ["//visibility:public"], ) filegroup( name = "response_protocol_files", srcs = glob(["*Response.json"]), visibility = ["//visibility:public"], ) """ http_archive( name = "kafka_source", build_file_content = KAFKASOURCE_BUILD_CONTENT, patches = ["@envoy//bazel/external:kafka_int32.patch"], **_get_location("kafka_source") ) # This archive provides Kafka (and Zookeeper) binaries, that are used during Kafka integration # tests. http_archive( name = "kafka_server_binary", build_file_content = BUILD_ALL_CONTENT, **_get_location("kafka_server_binary") ) # This archive provides Kafka client in Python, so we can use it to interact with Kafka server # during interation tests. http_archive( name = "kafka_python_client", build_file_content = BUILD_ALL_CONTENT, **_get_location("kafka_python_client") ) def _foreign_cc_dependencies(): _repository_impl("rules_foreign_cc") def _is_linux(ctxt): return ctxt.os.name == "linux" def _is_arch(ctxt, arch): res = ctxt.execute(["uname", "-m"]) return arch in res.stdout def _is_linux_ppc(ctxt): return _is_linux(ctxt) and _is_arch(ctxt, "ppc") def _is_linux_s390x(ctxt): return _is_linux(ctxt) and _is_arch(ctxt, "s390x") def _is_linux_x86_64(ctxt): return _is_linux(ctxt) and _is_arch(ctxt, "x86_64") ================================================ FILE: bazel/repositories_extra.bzl ================================================ load("@rules_python//python:repositories.bzl", "py_repositories") load("@rules_python//python:pip.bzl", "pip3_import", "pip_repositories") # Python dependencies. def _python_deps(): py_repositories() pip_repositories() pip3_import( name = "config_validation_pip3", requirements = "@envoy//tools/config_validation:requirements.txt", extra_pip_args = ["--require-hashes"], # project_name = "PyYAML", # project_url = "https://github.com/yaml/pyyaml", # version = "5.3.1", # last_update = "2020-03-18" # use_category = ["other"], # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", ) pip3_import( name = "configs_pip3", requirements = "@envoy//configs:requirements.txt", extra_pip_args = ["--require-hashes"], # project_name = "Jinja", # project_url = "http://palletsprojects.com/p/jinja", # version = "2.11.2", # last_update = "2020-04-13" # use_category = ["test"], # cpe = "cpe:2.3:a:palletsprojects:jinja:*", # project_name = "MarkupSafe", # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/", # version = "1.1.1", # last_update = "2019-02-23" # use_category = ["test"], ) pip3_import( name = "kafka_pip3", requirements = "@envoy//source/extensions/filters/network/kafka:requirements.txt", extra_pip_args = ["--require-hashes"], # project_name = "Jinja", # project_url = "http://palletsprojects.com/p/jinja", # version = "2.11.2", # last_update = "2020-04-13" # use_category = ["test"], # cpe = "cpe:2.3:a:palletsprojects:jinja:*", # project_name = "MarkupSafe", # project_url = "https://markupsafe.palletsprojects.com/en/1.1.x/", # version = "1.1.1", # last_update = "2019-02-23" # use_category = ["test"], ) pip3_import( name = "headersplit_pip3", requirements = "@envoy//tools/envoy_headersplit:requirements.txt", extra_pip_args = ["--require-hashes"], # project_name = "Clang", # project_url = "https://clang.llvm.org/", # version = "10.0.1", # last_update = "2020-07-21" # use_category = ["other"], # cpe = "cpe:2.3:a:llvm:clang:*", ) pip3_import( name = "protodoc_pip3", requirements = "@envoy//tools/protodoc:requirements.txt", extra_pip_args = ["--require-hashes"], # project_name = "PyYAML", # project_url = "https://github.com/yaml/pyyaml", # version = "5.3.1", # last_update = "2020-03-18" # use_category = ["other"], # cpe = "cpe:2.3:a:pyyaml:pyyaml:*", ) pip3_import( name = "thrift_pip3", requirements = "@envoy//test/extensions/filters/network/thrift_proxy:requirements.txt", extra_pip_args = ["--require-hashes"], # project_name = "Apache Thrift", # project_url = "http://thrift.apache.org/", # version = "0.11.0", # last_update = "2017-12-07" # use_category = ["dataplane"], # cpe = "cpe:2.3:a:apache:thrift:*", # project_name = "Six: Python 2 and 3 Compatibility Library", # project_url = "https://six.readthedocs.io/", # version = "1.15.0", # last_update = "2020-05-21" # use_category = ["dataplane"], ) # Envoy deps that rely on a first stage of dependency loading in envoy_dependencies(). def envoy_dependencies_extra(): _python_deps() ================================================ FILE: bazel/repository_locations.bzl ================================================ # Validation of content in this file is done on the bazel/repositories.bzl file to make it free of bazel # constructs. This is to allow this file to be loaded into Python based build and maintenance tools. # Envoy dependencies may be annotated with the following attributes: DEPENDENCY_ANNOTATIONS = [ # List of the categories describing how the dependency is being used. This attribute is used # for automatic tracking of security posture of Envoy's dependencies. # Possible values are documented in the USE_CATEGORIES list below. # This attribute is mandatory for each dependecy. "use_category", # Attribute specifying CPE (Common Platform Enumeration, see https://nvd.nist.gov/products/cpe) ID # of the dependency. The ID may be in v2.3 or v2.2 format, although v2.3 is prefferred. See # https://nvd.nist.gov/products/cpe for CPE format. Use single wildcard '*' for version and vector elements # i.e. 'cpe:2.3:a:nghttp2:nghttp2:*'. Use "N/A" for dependencies without CPE assigned. # This attribute is optional for components with use categories listed in the # USE_CATEGORIES_WITH_CPE_OPTIONAL "cpe", ] # NOTE: If a dependency use case is either dataplane or controlplane, the other uses are not needed # to be declared. USE_CATEGORIES = [ # This dependency is used in API protos. "api", # This dependency is used in build process. "build", # This dependency is used to process xDS requests. "controlplane", # This dependency is used in processing downstream or upstream requests (core). "dataplane_core", # This dependency is used in processing downstream or upstream requests (extensions). "dataplane_ext", # This dependecy is used for logging, metrics or tracing (core). It may process unstrusted input. "observability_core", # This dependecy is used for logging, metrics or tracing (extensions). It may process unstrusted input. "observability_ext", # This dependency does not handle untrusted data and is used for various utility purposes. "other", # This dependency is used only in tests. "test_only", ] # Components with these use categories are not required to specify the 'cpe' # and 'last_updated' annotation. USE_CATEGORIES_WITH_CPE_OPTIONAL = ["build", "other", "test_only"] DEPENDENCY_REPOSITORIES_SPEC = dict( bazel_compdb = dict( project_name = "bazel-compilation-database", project_desc = "Clang JSON compilation database support for Bazel", project_url = "https://github.com/grailbio/bazel-compilation-database", version = "0.4.5", sha256 = "bcecfd622c4ef272fd4ba42726a52e140b961c4eac23025f18b346c968a8cfb4", strip_prefix = "bazel-compilation-database-{version}", urls = ["https://github.com/grailbio/bazel-compilation-database/archive/{version}.tar.gz"], last_updated = "2020-08-01", use_category = ["build"], ), bazel_gazelle = dict( project_name = "Gazelle", project_desc = "Bazel BUILD file generator for Go projects", project_url = "https://github.com/bazelbuild/bazel-gazelle", version = "0.21.1", sha256 = "cdb02a887a7187ea4d5a27452311a75ed8637379a1287d8eeb952138ea485f7d", urls = ["https://github.com/bazelbuild/bazel-gazelle/releases/download/v{version}/bazel-gazelle-v{version}.tar.gz"], last_updated = "2020-05-28", use_category = ["build"], ), bazel_toolchains = dict( project_name = "bazel-toolchains", project_desc = "Bazel toolchain configs for RBE", project_url = "https://github.com/bazelbuild/bazel-toolchains", version = "3.4.1", sha256 = "7ebb200ed3ca3d1f7505659c7dfed01c4b5cb04c3a6f34140726fe22f5d35e86", strip_prefix = "bazel-toolchains-{version}", urls = [ "https://github.com/bazelbuild/bazel-toolchains/releases/download/{version}/bazel-toolchains-{version}.tar.gz", "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/{version}.tar.gz", ], last_updated = "2020-08-10", use_category = ["build"], ), build_bazel_rules_apple = dict( project_name = "Apple Rules for Bazel", project_desc = "Bazel rules for Apple platforms", project_url = "https://github.com/bazelbuild/rules_apple", version = "0.19.0", sha256 = "7a7afdd4869bb201c9352eed2daf37294d42b093579b70423490c1b4d4f6ce42", urls = ["https://github.com/bazelbuild/rules_apple/releases/download/{version}/rules_apple.{version}.tar.gz"], last_updated = "2020-10-10", use_category = ["build"], ), envoy_build_tools = dict( project_name = "envoy-build-tools", project_desc = "Common build tools shared by the Envoy/UDPA ecosystem", project_url = "https://github.com/envoyproxy/envoy-build-tools", version = "0ba5aa98a6e6c5efcc63f53602f69548d2417683", sha256 = "dc3881d16e7b0c855a7279f5757d55e4aa55fe2befbd9e34215b971818622f9e", strip_prefix = "envoy-build-tools-{version}", urls = ["https://github.com/envoyproxy/envoy-build-tools/archive/{version}.tar.gz"], last_updated = "2020-10-01", use_category = ["build"], ), boringssl = dict( project_name = "BoringSSL", project_desc = "Minimal OpenSSL fork", project_url = "https://github.com/google/boringssl", version = "597b810379e126ae05d32c1d94b1a9464385acd0", sha256 = "1ea42456c020daf0a9b0f9e8d8bc3a403c9314f4f54230c617257af996cd5fa6", strip_prefix = "boringssl-{version}", # To update BoringSSL, which tracks Chromium releases: # 1. Open https://omahaproxy.appspot.com/ and note of linux/stable release. # 2. Open https://chromium.googlesource.com/chromium/src/+/refs/tags//DEPS and note . # 3. Find a commit in BoringSSL's "master-with-bazel" branch that merges . # # chromium-85.0.4183.83 urls = ["https://github.com/google/boringssl/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], last_updated = "2020-06-23", cpe = "cpe:2.3:a:google:boringssl:*", ), boringssl_fips = dict( project_name = "BoringSSL (FIPS)", project_desc = "FIPS compliant BoringSSL", project_url = "https://boringssl.googlesource.com/boringssl/+/master/crypto/fipsmodule/FIPS.md", version = "fips-20190808", sha256 = "3b5fdf23274d4179c2077b5e8fa625d9debd7a390aac1d165b7e47234f648bb8", urls = ["https://commondatastorage.googleapis.com/chromium-boringssl-fips/boringssl-ae223d6138807a13006342edfeef32e813246b39.tar.xz"], use_category = ["controlplane", "dataplane_core"], last_updated = "2019-08-08", cpe = "cpe:2.3:a:google:boringssl:*", ), com_google_absl = dict( project_name = "Abseil", project_desc = "Open source collection of C++ libraries drawn from the most fundamental pieces of Google’s internal codebase", project_url = "https://abseil.io/", version = "093cc27604df1c4a179b73bc3f00d4d1ce2ce113", sha256 = "55d33c75aff05a8c4a55bdf0eddad66c71a963107bc2add96cf8eb88ddb47a80", strip_prefix = "abseil-cpp-{version}", urls = ["https://github.com/abseil/abseil-cpp/archive/{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], last_updated = "2020-10-01", cpe = "N/A", ), com_github_c_ares_c_ares = dict( project_name = "c-ares", project_desc = "C library for asynchronous DNS requests", project_url = "https://c-ares.haxx.se/", version = "1.16.1", sha256 = "d08312d0ecc3bd48eee0a4cc0d2137c9f194e0a28de2028928c0f6cae85f86ce", strip_prefix = "c-ares-{version}", urls = ["https://github.com/c-ares/c-ares/releases/download/cares-{underscore_version}/c-ares-{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], last_updated = "2020-05-11", cpe = "cpe:2.3:a:c-ares_project:c-ares:*", ), com_github_circonus_labs_libcircllhist = dict( project_name = "libcircllhist", project_desc = "An implementation of Circonus log-linear histograms", project_url = "https://github.com/circonus-labs/libcircllhist", version = "63a16dd6f2fc7bc841bb17ff92be8318df60e2e1", sha256 = "8165aa25e529d7d4b9ae849d3bf30371255a99d6db0421516abcff23214cdc2c", strip_prefix = "libcircllhist-{version}", urls = ["https://github.com/circonus-labs/libcircllhist/archive/{version}.tar.gz"], use_category = ["controlplane", "observability_core", "dataplane_core"], last_updated = "2019-02-11", cpe = "N/A", ), com_github_cyan4973_xxhash = dict( project_name = "xxHash", project_desc = "Extremely fast hash algorithm", project_url = "https://github.com/Cyan4973/xxHash", version = "0.7.3", sha256 = "952ebbf5b11fbf59ae5d760a562d1e9112278f244340ad7714e8556cbe54f7f7", strip_prefix = "xxHash-{version}", urls = ["https://github.com/Cyan4973/xxHash/archive/v{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], last_updated = "2020-03-04", cpe = "N/A", ), com_github_envoyproxy_sqlparser = dict( project_name = "C++ SQL Parser Library", project_desc = "Forked from Hyrise SQL Parser", project_url = "https://github.com/envoyproxy/sql-parser", version = "3b40ba2d106587bdf053a292f7e3bb17e818a57f", sha256 = "96c10c8e950a141a32034f19b19cdeb1da48fe859cf96ae5e19f894f36c62c71", strip_prefix = "sql-parser-{version}", urls = ["https://github.com/envoyproxy/sql-parser/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.filters.network.mysql_proxy", "envoy.filters.network.postgres_proxy", ], last_updated = "2020-06-10", cpe = "N/A", ), com_github_mirror_tclap = dict( project_name = "tclap", project_desc = "Small, flexible library that provides a simple interface for defining and accessing command line arguments", project_url = "http://tclap.sourceforge.net", version = "1-2-1", sha256 = "f0ede0721dddbb5eba3a47385a6e8681b14f155e1129dd39d1a959411935098f", strip_prefix = "tclap-tclap-{version}-release-final", urls = ["https://github.com/mirror/tclap/archive/tclap-{version}-release-final.tar.gz"], last_updated = "2017-11-10", use_category = ["other"], ), com_github_fmtlib_fmt = dict( project_name = "fmt", project_desc = "{fmt} is an open-source formatting library providing a fast and safe alternative to C stdio and C++ iostreams", project_url = "https://fmt.dev", version = "7.0.3", sha256 = "decfdf9ad274070fa85f26407b816f5a4d82205ae86bac1990be658d0795ea4d", strip_prefix = "fmt-{version}", urls = ["https://github.com/fmtlib/fmt/releases/download/{version}/fmt-{version}.zip"], use_category = ["dataplane_core", "controlplane"], last_updated = "2020-08-07", cpe = "cpe:2.3:a:fmt:fmt:*", ), com_github_gabime_spdlog = dict( project_name = "spdlog", project_desc = "Very fast, header-only/compiled, C++ logging library", project_url = "https://github.com/gabime/spdlog", version = "1.7.0", sha256 = "f0114a4d3c88be9e696762f37a7c379619443ce9d668546c61b21d41affe5b62", strip_prefix = "spdlog-{version}", urls = ["https://github.com/gabime/spdlog/archive/v{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], last_updated = "2020-07-09", cpe = "N/A", ), com_github_google_libprotobuf_mutator = dict( project_name = "libprotobuf-mutator", project_desc = "Library to randomly mutate protobuffers", project_url = "https://github.com/google/libprotobuf-mutator", version = "8942a9ba43d8bb196230c321d46d6a137957a719", sha256 = "49a26dbe77c75f2eca1dd8a9fbdb31c4496d9af42df027ff57569c5a7a5d980d", strip_prefix = "libprotobuf-mutator-{version}", urls = ["https://github.com/google/libprotobuf-mutator/archive/{version}.tar.gz"], last_updated = "2020-08-18", use_category = ["test_only"], ), com_github_google_tcmalloc = dict( project_name = "tcmalloc", project_desc = "Fast, multi-threaded malloc implementation", project_url = "https://github.com/google/tcmalloc", version = "d1311bf409db47c3441d3de6ea07d768c6551dec", sha256 = "e22444b6544edd81f11c987dd5e482a2e00bbff717badb388779ca57525dad50", strip_prefix = "tcmalloc-{version}", urls = ["https://github.com/google/tcmalloc/archive/{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], last_updated = "2020-09-16", cpe = "N/A", ), com_github_gperftools_gperftools = dict( project_name = "gperftools", project_desc = "tcmalloc and profiling libraries", project_url = "https://github.com/gperftools/gperftools", version = "2.8", sha256 = "240deacdd628b6459671b83eb0c4db8e97baadf659f25b92e9a078d536bd513e", strip_prefix = "gperftools-{version}", urls = ["https://github.com/gperftools/gperftools/releases/download/gperftools-{version}/gperftools-{version}.tar.gz"], last_updated = "2020-07-06", use_category = ["dataplane_core", "controlplane"], cpe = "cpe:2.3:a:gperftools_project:gperftools:*", ), com_github_grpc_grpc = dict( project_name = "gRPC", project_desc = "gRPC C core library", project_url = "https://grpc.io", # TODO(JimmyCYJ): Bump to release 1.27 # This sha on grpc:v1.25.x branch is specifically chosen to fix gRPC STS call credential options. version = "d8f4928fa779f6005a7fe55a176bdb373b0f910f", sha256 = "bbc8f020f4e85ec029b047fab939b8c81f3d67254b5c724e1003a2bc49ddd123", strip_prefix = "grpc-{version}", urls = ["https://github.com/grpc/grpc/archive/{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], last_updated = "2020-02-11", cpe = "cpe:2.3:a:grpc:grpc:*", ), com_github_luajit_luajit = dict( project_name = "LuaJIT", project_desc = "Just-In-Time compiler for Lua", project_url = "https://luajit.org", version = "2.1.0-beta3", sha256 = "409f7fe570d3c16558e594421c47bdd130238323c9d6fd6c83dedd2aaeb082a8", strip_prefix = "LuaJIT-{version}", urls = ["https://github.com/LuaJIT/LuaJIT/archive/v{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.lua"], last_updated = "2017-11-07", cpe = "cpe:2.3:a:luajit:luajit:*", ), com_github_moonjit_moonjit = dict( project_name = "Moonjit", project_desc = "LuaJIT fork with wider platform support", project_url = "https://github.com/moonjit/moonjit", version = "2.2.0", sha256 = "83deb2c880488dfe7dd8ebf09e3b1e7613ef4b8420de53de6f712f01aabca2b6", strip_prefix = "moonjit-{version}", urls = ["https://github.com/moonjit/moonjit/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.lua"], last_updated = "2020-01-14", cpe = "cpe:2.3:a:moonjit_project:moonjit:*", ), com_github_nghttp2_nghttp2 = dict( project_name = "Nghttp2", project_desc = "Implementation of HTTP/2 and its header compression algorithm HPACK in Cimplementation of HTTP/2 and its header compression algorithm HPACK in C", project_url = "https://nghttp2.org", version = "1.41.0", sha256 = "eacc6f0f8543583ecd659faf0a3f906ed03826f1d4157b536b4b385fe47c5bb8", strip_prefix = "nghttp2-{version}", urls = ["https://github.com/nghttp2/nghttp2/releases/download/v{version}/nghttp2-{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], last_updated = "2020-06-02", cpe = "cpe:2.3:a:nghttp2:nghttp2:*", ), io_opentracing_cpp = dict( project_name = "OpenTracing", project_desc = "Vendor-neutral APIs and instrumentation for distributed tracing", project_url = "https://opentracing.io", version = "1.5.1", sha256 = "015c4187f7a6426a2b5196f0ccd982aa87f010cf61f507ae3ce5c90523f92301", strip_prefix = "opentracing-cpp-{version}", urls = ["https://github.com/opentracing/opentracing-cpp/archive/v{version}.tar.gz"], use_category = ["observability_ext"], extensions = [ "envoy.tracers.datadog", "envoy.tracers.dynamic_ot", "envoy.tracers.lightstep", ], last_updated = "2019-01-16", cpe = "N/A", ), com_lightstep_tracer_cpp = dict( project_name = "lightstep-tracer-cpp", project_desc = "LightStep distributed tracing library for C++", project_url = "https://github.com/lightstep/lightstep-tracer-cpp", version = "1942b3f142e218ebc143a043f32e3278dafec9aa", sha256 = "3238921a8f578beb26c2215cd277e8f6752f3d29b020b881d60d96a240a38aed", strip_prefix = "lightstep-tracer-cpp-{version}", urls = ["https://github.com/lightstep/lightstep-tracer-cpp/archive/{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.lightstep"], last_updated = "2020-08-24", cpe = "N/A", ), com_github_datadog_dd_opentracing_cpp = dict( project_name = "Datadog OpenTracing C++ Client", project_desc = "Datadog OpenTracing C++ Client", project_url = "https://github.com/DataDog/dd-opentracing-cpp", version = "1.1.5", sha256 = "b84fd2fb0bb0578af4901db31d1c0ae909b532a1016fe6534cbe31a6c3ad6924", strip_prefix = "dd-opentracing-cpp-{version}", urls = ["https://github.com/DataDog/dd-opentracing-cpp/archive/v{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.datadog"], last_updated = "2020-05-15", cpe = "N/A", ), com_github_google_benchmark = dict( project_name = "Benchmark", project_desc = "Library to benchmark code snippets", project_url = "https://github.com/google/benchmark", version = "1.5.1", sha256 = "23082937d1663a53b90cb5b61df4bcc312f6dee7018da78ba00dd6bd669dfef2", strip_prefix = "benchmark-{version}", urls = ["https://github.com/google/benchmark/archive/v{version}.tar.gz"], use_category = ["test_only"], last_updated = "2020-06-09", ), com_github_libevent_libevent = dict( project_name = "libevent", project_desc = "Event notification library", project_url = "https://libevent.org", # This SHA includes the new "prepare" and "check" watchers, used for event loop performance # stats (see https://github.com/libevent/libevent/pull/793) and the fix for a race condition # in the watchers (see https://github.com/libevent/libevent/pull/802). # This also includes the fixes for https://github.com/libevent/libevent/issues/806 # and https://github.com/lyft/envoy-mobile/issues/215. # This also includes the fixes for Phantom events with EV_ET (see # https://github.com/libevent/libevent/issues/984). # This also includes the wepoll backend for Windows (see # https://github.com/libevent/libevent/pull/1006) # TODO(adip): Update to v2.2 when it is released. version = "62c152d9a7cd264b993dad730c4163c6ede2e0a3", sha256 = "4c80e5fe044ce5f8055b20a2f141ee32ec2614000f3e95d2aa81611a4c8f5213", strip_prefix = "libevent-{version}", urls = ["https://github.com/libevent/libevent/archive/{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], last_updated = "2020-07-31", cpe = "cpe:2.3:a:libevent_project:libevent:*", ), # This should be removed, see https://github.com/envoyproxy/envoy/issues/13261. net_zlib = dict( project_name = "zlib", project_desc = "zlib compression library", project_url = "https://zlib.net", version = "79baebe50e4d6b73ae1f8b603f0ef41300110aa3", # Use the dev branch of zlib to resolve fuzz bugs and out of bound # errors resulting in crashes in zlib 1.2.11. # TODO(asraa): Remove when zlib > 1.2.11 is released. sha256 = "155a8f8c1a753fb05b16a1b0cc0a0a9f61a78e245f9e0da483d13043b3bcbf2e", strip_prefix = "zlib-{version}", urls = ["https://github.com/madler/zlib/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], last_updated = "2019-04-14", cpe = "cpe:2.3:a:gnu:zlib:*", ), com_github_zlib_ng_zlib_ng = dict( project_name = "zlib-ng", project_desc = "zlib fork (higher performance)", project_url = "https://github.com/zlib-ng/zlib-ng", version = "193d8fd7dfb7927facab7a3034daa27ad5b9df1c", sha256 = "5fe543e8d007b9e7b729f3d6b3a5ee1f9b68d0eef5f6af1393745a4dcd472a98", strip_prefix = "zlib-ng-{version}", urls = ["https://github.com/zlib-ng/zlib-ng/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], last_updated = "2020-08-16", cpe = "N/A", ), com_github_jbeder_yaml_cpp = dict( project_name = "yaml-cpp", project_desc = "YAML parser and emitter in C++ matching the YAML 1.2 spec", project_url = "https://github.com/jbeder/yaml-cpp", version = "98acc5a8874faab28b82c28936f4b400b389f5d6", sha256 = "79ab7069ef1c7c3632e7ffe095f7185d4c77b64d8035db3c085c239d4fe96d5f", strip_prefix = "yaml-cpp-{version}", urls = ["https://github.com/jbeder/yaml-cpp/archive/{version}.tar.gz"], # YAML is also used for runtime as well as controlplane. It shouldn't appear on the # dataplane but we can't verify this automatically due to code structure today. use_category = ["controlplane", "dataplane_core"], last_updated = "2020-07-28", cpe = "cpe:2.3:a:yaml-cpp_project:yaml-cpp:*", ), com_github_msgpack_msgpack_c = dict( project_name = "msgpack for C/C++", project_desc = "MessagePack is an efficient binary serialization format", project_url = "https://github.com/msgpack/msgpack-c", version = "3.3.0", sha256 = "6e114d12a5ddb8cb11f669f83f32246e484a8addd0ce93f274996f1941c1f07b", strip_prefix = "msgpack-{version}", urls = ["https://github.com/msgpack/msgpack-c/releases/download/cpp-{version}/msgpack-{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.datadog"], last_updated = "2020-06-05", cpe = "N/A", ), com_github_google_jwt_verify = dict( project_name = "jwt_verify_lib", project_desc = "JWT verification library for C++", project_url = "https://github.com/google/jwt_verify_lib", version = "7276a339af8426724b744216f619c99152f8c141", sha256 = "f1fde4f3ebb3b2d841332c7a02a4b50e0529a19709934c63bc6208d1bbe28fb1", strip_prefix = "jwt_verify_lib-{version}", urls = ["https://github.com/google/jwt_verify_lib/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.jwt_authn"], last_updated = "2020-07-09", cpe = "N/A", ), com_github_nodejs_http_parser = dict( project_name = "HTTP Parser", project_desc = "Parser for HTTP messages written in C", project_url = "https://github.com/nodejs/http-parser", # This SHA includes fix for https://github.com/nodejs/http-parser/issues/517 which allows (opt-in) to serve # requests with both Content-Legth and Transfer-Encoding: chunked headers set. version = "4f15b7d510dc7c6361a26a7c6d2f7c3a17f8d878", sha256 = "6a12896313ce1ca630cf516a0ee43a79b5f13f5a5d8143f56560ac0b21c98fac", strip_prefix = "http-parser-{version}", urls = ["https://github.com/nodejs/http-parser/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], last_updated = "2020-07-10", cpe = "cpe:2.3:a:nodejs:node.js:*", ), com_github_tencent_rapidjson = dict( project_name = "RapidJSON", project_desc = "Fast JSON parser/generator for C++", project_url = "https://rapidjson.org", version = "dfbe1db9da455552f7a9ad5d2aea17dd9d832ac1", sha256 = "a2faafbc402394df0fa94602df4b5e4befd734aad6bb55dfef46f62fcaf1090b", strip_prefix = "rapidjson-{version}", urls = ["https://github.com/Tencent/rapidjson/archive/{version}.tar.gz"], # We're mostly using com_google_protobuf for JSON, but there are some extensions and hard to # disentangle uses on the dataplane, e.g. header_formatter, Squash filter. use_category = ["controlplane", "dataplane_core"], last_updated = "2019-12-02", cpe = "cpe:2.3:a:tencent:rapidjson:*", ), com_github_twitter_common_lang = dict( project_name = "twitter.common.lang (Thrift)", project_desc = "twitter.common Python language and compatibility facilities", project_url = "https://pypi.org/project/twitter.common.lang", version = "0.3.9", sha256 = "56d1d266fd4767941d11c27061a57bc1266a3342e551bde3780f9e9eb5ad0ed1", strip_prefix = "twitter.common.lang-{version}/src", urls = ["https://files.pythonhosted.org/packages/08/bc/d6409a813a9dccd4920a6262eb6e5889e90381453a5f58938ba4cf1d9420/twitter.common.lang-{version}.tar.gz"], last_updated = "2018-06-26", use_category = ["test_only"], ), com_github_twitter_common_rpc = dict( project_name = "twitter.common.rpc (Thrift)", project_desc = "twitter.common Thrift helpers including Finagle and SSL transports", project_url = "https://pypi.org/project/twitter.common.rpc", version = "0.3.9", sha256 = "0792b63fb2fb32d970c2e9a409d3d00633190a22eb185145fe3d9067fdaa4514", strip_prefix = "twitter.common.rpc-{version}/src", urls = ["https://files.pythonhosted.org/packages/be/97/f5f701b703d0f25fbf148992cd58d55b4d08d3db785aad209255ee67e2d0/twitter.common.rpc-{version}.tar.gz"], last_updated = "2018-06-26", use_category = ["test_only"], ), com_github_twitter_common_finagle_thrift = dict( project_name = "twitter.common.finagle-thrift", project_desc = "twitter.common Thrift stubs for Zipkin RPC tracing support in Finagle", project_url = "https://pypi.org/project/twitter.common.finagle-thrift", version = "0.3.9", sha256 = "1e3a57d11f94f58745e6b83348ecd4fa74194618704f45444a15bc391fde497a", strip_prefix = "twitter.common.finagle-thrift-{version}/src", urls = ["https://files.pythonhosted.org/packages/f9/e7/4f80d582578f8489226370762d2cf6bc9381175d1929eba1754e03f70708/twitter.common.finagle-thrift-{version}.tar.gz"], last_updated = "2018-06-26", use_category = ["test_only"], ), com_google_googletest = dict( project_name = "Google Test", project_desc = "Google's C++ test framework", project_url = "https://github.com/google/googletest", # Pick up fix for MOCK_METHOD compilation with clang-cl for Windows (resolved after 1.10.0) # see https://github.com/google/googletest/issues/2490 version = "a4ab0abb93620ce26efad9de9296b73b16e88588", sha256 = "7897bfaa5ad39a479177cfb5c3ce010184dbaee22a7c3727b212282871918751", strip_prefix = "googletest-{version}", urls = ["https://github.com/google/googletest/archive/{version}.tar.gz"], last_updated = "2020-09-10", use_category = ["test_only"], ), com_google_protobuf = dict( project_name = "Protocol Buffers", project_desc = "Language-neutral, platform-neutral extensible mechanism for serializing structured data", project_url = "https://developers.google.com/protocol-buffers", version = "3.10.1", sha256 = "d7cfd31620a352b2ee8c1ed883222a0d77e44346643458e062e86b1d069ace3e", strip_prefix = "protobuf-{version}", urls = ["https://github.com/protocolbuffers/protobuf/releases/download/v{version}/protobuf-all-{version}.tar.gz"], use_category = ["dataplane_core", "controlplane"], last_updated = "2020-10-24", cpe = "cpe:2.3:a:google:protobuf:*", ), grpc_httpjson_transcoding = dict( project_name = "grpc-httpjson-transcoding", project_desc = "Library that supports transcoding so that HTTP/JSON can be converted to gRPC", project_url = "https://github.com/grpc-ecosystem/grpc-httpjson-transcoding", version = "faf8af1e9788cd4385b94c8f85edab5ea5d4b2d6", sha256 = "62c8cb5ea2cca1142cde9d4a0778c52c6022345c3268c60ef81666946b958ad5", strip_prefix = "grpc-httpjson-transcoding-{version}", urls = ["https://github.com/grpc-ecosystem/grpc-httpjson-transcoding/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.http.grpc_json_transcoder"], last_updated = "2020-03-02", cpe = "N/A", ), io_bazel_rules_go = dict( project_name = "Go rules for Bazel", project_desc = "Bazel rules for the Go language", project_url = "https://github.com/bazelbuild/rules_go", version = "0.23.7", sha256 = "0310e837aed522875791750de44408ec91046c630374990edd51827cb169f616", urls = ["https://github.com/bazelbuild/rules_go/releases/download/v{version}/rules_go-v{version}.tar.gz"], use_category = ["build"], last_updated = "2020-08-06", ), rules_cc = dict( project_name = "C++ rules for Bazel", project_desc = "Bazel rules for the C++ language", project_url = "https://github.com/bazelbuild/rules_cc", # TODO(lizan): pin to a point releases when there's a released version. version = "818289e5613731ae410efb54218a4077fb9dbb03", sha256 = "9d48151ea71b3e225adfb6867e6d2c7d0dce46cbdc8710d9a9a628574dfd40a0", strip_prefix = "rules_cc-{version}", urls = ["https://github.com/bazelbuild/rules_cc/archive/{version}.tar.gz"], last_updated = "2020-05-13", use_category = ["build"], ), rules_foreign_cc = dict( project_name = "Rules for using foreign build systems in Bazel", project_desc = "Rules for using foreign build systems in Bazel", project_url = "https://github.com/bazelbuild/rules_foreign_cc", version = "594bf4d7731e606a705f3ad787dd0a70c5a28b30", sha256 = "2b1cf88de0b6e0195f6571cfde3a5bd406d11b42117d6adef2395c9525a1902e", strip_prefix = "rules_foreign_cc-{version}", urls = ["https://github.com/bazelbuild/rules_foreign_cc/archive/{version}.tar.gz"], last_updated = "2020-08-21", use_category = ["build"], ), rules_python = dict( project_name = "Python rules for Bazel", project_desc = "Bazel rules for the Python language", project_url = "https://github.com/bazelbuild/rules_python", # TODO(htuch): revert back to a point releases when pip3_import appears. version = "a0fbf98d4e3a232144df4d0d80b577c7a693b570", sha256 = "76a8fd4e7eca2a3590f816958faa0d83c9b2ce9c32634c5c375bcccf161d3bb5", strip_prefix = "rules_python-{version}", urls = ["https://github.com/bazelbuild/rules_python/archive/{version}.tar.gz"], last_updated = "2020-04-09", use_category = ["build"], ), six = dict( project_name = "Six", project_desc = "Python 2 and 3 compatibility library", project_url = "https://pypi.org/project/six", version = "1.12.0", sha256 = "d16a0141ec1a18405cd4ce8b4613101da75da0e9a7aec5bdd4fa804d0e0eba73", urls = ["https://files.pythonhosted.org/packages/dd/bf/4138e7bfb757de47d1f4b6994648ec67a51efe58fa907c1e11e350cddfca/six-{version}.tar.gz"], last_updated = "2019-11-17", use_category = ["other"], ), org_llvm_llvm = dict( project_name = "LLVM", project_desc = "LLVM Compiler Infrastructure", project_url = "https://llvm.org", version = "10.0", sha256 = "df83a44b3a9a71029049ec101fb0077ecbbdf5fe41e395215025779099a98fdf", strip_prefix = "llvm-{version}.0.src", urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}.0/llvm-{version}.0.src.tar.xz"], last_updated = "2020-03-24", use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", "envoy.bootstrap.wasm", "envoy.filters.http.wasm", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], cpe = "N/A", ), com_github_wavm_wavm = dict( project_name = "WAVM", project_desc = "WebAssembly Virtual Machine", project_url = "https://wavm.github.io", version = "e8155f1f3af88b4d08802716a7054950ef18d827", sha256 = "cc3fcaf05d57010c9cf8eb920234679dede6c780137b55001fd34e4d14806f7c", strip_prefix = "WAVM-{version}", urls = ["https://github.com/WAVM/WAVM/archive/{version}.tar.gz"], last_updated = "2020-07-06", use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", "envoy.bootstrap.wasm", "envoy.filters.http.wasm", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], cpe = "N/A", ), io_opencensus_cpp = dict( project_name = "OpenCensus C++", project_desc = "OpenCensus tracing library", project_url = "https://github.com/census-instrumentation/opencensus-cpp", version = "7877337633466358ed680f9b26967da5b310d7aa", sha256 = "12ff300fa804f97bd07e2ff071d969e09d5f3d7bbffeac438c725fa52a51a212", strip_prefix = "opencensus-cpp-{version}", urls = ["https://github.com/census-instrumentation/opencensus-cpp/archive/{version}.tar.gz"], use_category = ["observability_ext"], extensions = ["envoy.tracers.opencensus"], last_updated = "2020-06-01", cpe = "N/A", ), # This should be removed, see https://github.com/envoyproxy/envoy/issues/11816. com_github_curl = dict( project_name = "curl", project_desc = "Library for transferring data with URLs", project_url = "https://curl.haxx.se", version = "7.72.0", sha256 = "d4d5899a3868fbb6ae1856c3e55a32ce35913de3956d1973caccd37bd0174fa2", strip_prefix = "curl-{version}", urls = ["https://github.com/curl/curl/releases/download/curl-{underscore_version}/curl-{version}.tar.gz"], use_category = ["dataplane_ext", "observability_ext"], extensions = [ "envoy.filters.http.aws_lambda", "envoy.filters.http.aws_request_signing", "envoy.grpc_credentials.aws_iam", "envoy.tracers.opencensus", ], last_updated = "2020-08-19", cpe = "cpe:2.3:a:haxx:curl:*", ), com_googlesource_chromium_v8 = dict( project_name = "V8", project_desc = "Google’s open source high-performance JavaScript and WebAssembly engine, written in C++", project_url = "https://v8.dev", version = "8.5.210.20", # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh # and contains complete checkout of V8 with all dependencies necessary to build wee8. sha256 = "ef404643d7da6854b76b9fb9950a79a1acbd037b7a26f02c585ac379b0f7dee1", urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", "envoy.bootstrap.wasm", "envoy.filters.http.wasm", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], last_updated = "2020-08-31", cpe = "cpe:2.3:a:google:v8:*", ), com_googlesource_quiche = dict( project_name = "QUICHE", project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", project_url = "https://quiche.googlesource.com/quiche", # Static snapshot of https://quiche.googlesource.com/quiche/+archive/f555d99a084cdd086a349548c70fb558ac5847cf.tar.gz version = "f555d99a084cdd086a349548c70fb558ac5847cf", sha256 = "1833f08e7b0f18b49d7498b029b7f3e6559a82113ec82a98a9e945553756e351", urls = ["https://storage.googleapis.com/quiche-envoy-integration/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = ["envoy.transport_sockets.quic"], last_updated = "2020-09-18", cpe = "N/A", ), com_googlesource_googleurl = dict( project_name = "Chrome URL parsing library", project_desc = "Chrome URL parsing library", project_url = "https://quiche.googlesource.com/googleurl", # Static snapshot of https://quiche.googlesource.com/quiche/+archive/ef0d23689e240e6c8de4c3a5296b209128c87373.tar.gz. version = "ef0d23689e240e6c8de4c3a5296b209128c87373", sha256 = "d769283fed1319bca68bae8bdd47fbc3a7933999329eee850eff1f1ea61ce176", urls = ["https://storage.googleapis.com/quiche-envoy-integration/googleurl_{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [], last_updated = "2020-08-05", cpe = "N/A", ), com_google_cel_cpp = dict( project_name = "Common Expression Language (CEL) C++ library", project_desc = "Common Expression Language (CEL) C++ library", project_url = "https://opensource.google/projects/cel", version = "b9453a09b28a1531c4917e8792b3ea61f6b1a447", sha256 = "cad7d01139947d78e413d112cb8f7431fbb33cf66b0adf9c280824803fc2a72e", strip_prefix = "cel-cpp-{version}", urls = ["https://github.com/google/cel-cpp/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", "envoy.bootstrap.wasm", "envoy.filters.http.rbac", "envoy.filters.http.wasm", "envoy.filters.network.rbac", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], last_updated = "2020-07-14", cpe = "N/A", ), com_github_google_flatbuffers = dict( project_name = "FlatBuffers", project_desc = "Cross platform serialization library architected for maximum memory efficiency", project_url = "https://github.com/google/flatbuffers", version = "a83caf5910644ba1c421c002ef68e42f21c15f9f", sha256 = "b8efbc25721e76780752bad775a97c3f77a0250271e2db37fc747b20e8b0f24a", strip_prefix = "flatbuffers-{version}", urls = ["https://github.com/google/flatbuffers/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", "envoy.bootstrap.wasm", "envoy.filters.http.wasm", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], last_updated = "2020-07-29", cpe = "N/A", ), com_googlesource_code_re2 = dict( project_name = "RE2", project_desc = "RE2, a regular expression library", project_url = "https://github.com/google/re2", version = "2020-07-06", sha256 = "2e9489a31ae007c81e90e8ec8a15d62d58a9c18d4fd1603f6441ef248556b41f", strip_prefix = "re2-{version}", urls = ["https://github.com/google/re2/archive/{version}.tar.gz"], use_category = ["controlplane", "dataplane_core"], last_updated = "2020-07-06", cpe = "N/A", ), # Included to access FuzzedDataProvider.h. This is compiler agnostic but # provided as part of the compiler-rt source distribution. We can't use the # Clang variant as we are not a Clang-LLVM only shop today. org_llvm_releases_compiler_rt = dict( project_name = "compiler-rt", project_desc = "LLVM compiler runtime library", project_url = "https://compiler-rt.llvm.org", version = "10.0.0", sha256 = "6a7da64d3a0a7320577b68b9ca4933bdcab676e898b759850e827333c3282c75", # Only allow peeking at fuzzer related files for now. strip_prefix = "compiler-rt-{version}.src", urls = ["https://github.com/llvm/llvm-project/releases/download/llvmorg-{version}/compiler-rt-{version}.src.tar.xz"], last_updated = "2020-03-24", use_category = ["test_only"], ), upb = dict( project_name = "upb", project_desc = "A small protobuf implementation in C (gRPC dependency)", project_url = "https://github.com/protocolbuffers/upb", version = "8a3ae1ef3e3e3f26b45dec735c5776737fc7247f", sha256 = "e9f281c56ab1eb1f97a80ca8a83bb7ef73d230eabb8591f83876f4e7b85d9b47", strip_prefix = "upb-{version}", urls = ["https://github.com/protocolbuffers/upb/archive/{version}.tar.gz"], use_category = ["controlplane"], last_updated = "2019-11-19", cpe = "N/A", ), kafka_source = dict( project_name = "Kafka (source)", project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", version = "2.4.1", sha256 = "740236f44d66e33ea83382383b4fb7eabdab7093a644b525dd5ec90207f933bd", strip_prefix = "kafka-{version}/clients/src/main/resources/common/message", urls = ["https://github.com/apache/kafka/archive/{version}.zip"], use_category = ["dataplane_ext"], extensions = ["envoy.filters.network.kafka_broker"], last_updated = "2020-08-26", cpe = "cpe:2.3:a:apache:kafka:*", ), kafka_server_binary = dict( project_name = "Kafka (server binary)", project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", version = "2.4.1", sha256 = "2177cbd14118999e1d76fec628ca78ace7e6f841219dbc6035027c796bbe1a2a", strip_prefix = "kafka_2.12-{version}", urls = ["https://mirrors.gigenet.com/apache/kafka/{version}/kafka_2.12-{version}.tgz"], last_updated = "2020-08-26", use_category = ["test_only"], ), kafka_python_client = dict( project_name = "Kafka (Python client)", project_desc = "Open-source distributed event streaming platform", project_url = "https://kafka.apache.org", version = "2.0.1", sha256 = "05f7c6eecb402f11fcb7e524c903f1ba1c38d3bdc9bf42bc8ec3cf7567b9f979", strip_prefix = "kafka-python-{version}", urls = ["https://github.com/dpkp/kafka-python/archive/{version}.tar.gz"], last_updated = "2020-08-26", use_category = ["test_only"], ), proxy_wasm_cpp_sdk = dict( project_name = "WebAssembly for Proxies (C++ SDK)", project_desc = "WebAssembly for Proxies (C++ SDK)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-sdk", version = "7afb39d868a973caa6216a535c24e37fb666b6f3", sha256 = "213d0b441bcc3df2c87933b24a593b5fd482fa8f4db158b707c60005b9e70040", strip_prefix = "proxy-wasm-cpp-sdk-{version}", # 2020-09-10 urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", "envoy.bootstrap.wasm", "envoy.filters.http.wasm", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], last_updated = "2020-07-29", cpe = "N/A", ), proxy_wasm_cpp_host = dict( project_name = "WebAssembly for Proxies (C++ host implementation)", project_desc = "WebAssembly for Proxies (C++ host implementation)", project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host", # 2020-09-10 version = "49ed20e895b728aae6b811950a2939ecbaf76f7c", sha256 = "fa03293d01450b9164f8f56ef9227301f7d1af4f373f996400f75c93f6ebc822", strip_prefix = "proxy-wasm-cpp-host-{version}", urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", "envoy.bootstrap.wasm", "envoy.filters.http.wasm", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], last_updated = "2020-07-29", cpe = "N/A", ), # TODO: upgrade to the latest version (1.41 currently fails tests) emscripten_toolchain = dict( project_name = "Emscripten SDK", project_desc = "Emscripten SDK (use by Wasm)", project_url = "https://github.com/emscripten-core/emsdk", version = "1.39", sha256 = "4ac0f1f3de8b3f1373d435cd7e58bd94de4146e751f099732167749a229b443b", patch_cmds = [ "[[ \"$(uname -m)\" == \"x86_64\" ]] && ./emsdk install 1.39.6-upstream && ./emsdk activate --embedded 1.39.6-upstream || true", ], strip_prefix = "emsdk-{version}.6", urls = ["https://github.com/emscripten-core/emsdk/archive/{version}.6.tar.gz"], use_category = ["build"], last_updated = "2020-07-29", ), io_bazel_rules_rust = dict( project_name = "Bazel rust rules", project_desc = "Bazel rust rules (used by Wasm)", project_url = "https://github.com/bazelbuild/rules_rust", version = "fda9a1ce6482973adfda022cadbfa6b300e269c3", sha256 = "484a2b2b67cd2d1fa1054876de7f8d291c4b203fd256bc8cbea14d749bb864ce", # Last commit where "out_binary = True" works. # See: https://github.com/bazelbuild/rules_rust/issues/386 strip_prefix = "rules_rust-{version}", urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"], use_category = ["build"], last_updated = "2020-07-29", ), rules_antlr = dict( project_name = "ANTLR Rules for Bazel", project_desc = "Bazel rules for ANTLR", project_url = "https://github.com/marcohu/rules_antlr", version = "3cc2f9502a54ceb7b79b37383316b23c4da66f9a", sha256 = "7249d1569293d9b239e23c65f6b4c81a07da921738bde0dfeb231ed98be40429", strip_prefix = "rules_antlr-{version}", urls = ["https://github.com/marcohu/rules_antlr/archive/{version}.tar.gz"], # This should be "build", but that trips the verification in the docs. use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", "envoy.bootstrap.wasm", "envoy.filters.http.wasm", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], last_updated = "2020-07-29", cpe = "N/A", ), antlr4_runtimes = dict( project_name = "ANTLR v4", project_desc = "ANTLR (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files", project_url = "https://github.com/antlr/antlr4", version = "4.7.1", sha256 = "4d0714f441333a63e50031c9e8e4890c78f3d21e053d46416949803e122a6574", strip_prefix = "antlr4-{version}", urls = ["https://github.com/antlr/antlr4/archive/{version}.tar.gz"], use_category = ["dataplane_ext"], extensions = [ "envoy.access_loggers.wasm", "envoy.bootstrap.wasm", "envoy.filters.http.wasm", "envoy.filters.network.wasm", "envoy.stat_sinks.wasm", ], last_updated = "2020-07-29", cpe = "N/A", ), ) def _format_version(s, version): return s.format(version = version, dash_version = version.replace(".", "-"), underscore_version = version.replace(".", "_")) # Interpolate {version} in the above dependency specs. This code should be capable of running in both Python # and Starlark. def _dependency_repositories(): locations = {} for key, location in DEPENDENCY_REPOSITORIES_SPEC.items(): mutable_location = dict(location) locations[key] = mutable_location # Fixup with version information. if "version" in location: if "strip_prefix" in location: mutable_location["strip_prefix"] = _format_version(location["strip_prefix"], location["version"]) mutable_location["urls"] = [_format_version(url, location["version"]) for url in location["urls"]] return locations DEPENDENCY_REPOSITORIES = _dependency_repositories() ================================================ FILE: bazel/rules_go.patch ================================================ # # Bazel RBE on Windows GCP workers currently will not invoke cmd.exe batch files correctly # # Symptom is program not found 'bazel-out', because of the way that the CreateProcess command # is constructed by bazel with actions.run with forward slashes, e.g. the command # cmd.exe /c "bazel-out/host/bin/external/go_sdk/builder.exe.bat" # where cmd.exe on GCP is treating 'bazel-out' as the target, and /host as a command line switch. # This problem was not observed on Azure CI pipelines or locally by the developers. The eventual # fix is not specific to rules_go; this patch simply addresses immediate breakage and can be removed # once the underlying issue within Bazel/RBE is fixed. # See: # - https://github.com/bazelbuild/rules_go/pull/2542 # - https://github.com/envoyproxy/envoy/issues/11657 # diff --git a/go/private/rules/binary.bzl b/go/private/rules/binary.bzl index b88dfd96..e68b5ece 100644 --- a/go/private/rules/binary.bzl +++ b/go/private/rules/binary.bzl @@ -128,8 +128,9 @@ def _go_tool_binary_impl(ctx): content = cmd, ) ctx.actions.run( - executable = bat, - inputs = sdk.libs + sdk.headers + sdk.tools + ctx.files.srcs + [sdk.go], + executable = "cmd.exe", + arguments = ["/S", "/C", bat.path.replace("/", "\\")], + inputs = sdk.libs + sdk.headers + sdk.tools + ctx.files.srcs + [sdk.go, bat], outputs = [cout], env = {"GOROOT": sdk.root_file.dirname}, # NOTE(#2005): avoid realpath in sandbox mnemonic = "GoToolchainBinaryCompile", ================================================ FILE: bazel/setup_clang.sh ================================================ #!/bin/bash BAZELRC_FILE="${BAZELRC_FILE:-$(bazel info workspace)/clang.bazelrc}" LLVM_PREFIX=$1 if [[ ! -e "${LLVM_PREFIX}/bin/llvm-config" ]]; then echo "Error: cannot find llvm-config in ${LLVM_PREFIX}." exit 1 fi PATH="$("${LLVM_PREFIX}"/bin/llvm-config --bindir):${PATH}" export PATH RT_LIBRARY_PATH="$(dirname "$(find "$(llvm-config --libdir)" -name libclang_rt.ubsan_standalone_cxx-x86_64.a | head -1)")" echo "# Generated file, do not edit. If you want to disable clang, just delete this file. build:clang --action_env='PATH=${PATH}' build:clang --action_env=CC=clang build:clang --action_env=CXX=clang++ build:clang --action_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config' build:clang --repo_env='LLVM_CONFIG=${LLVM_PREFIX}/bin/llvm-config' build:clang --linkopt='-L$(llvm-config --libdir)' build:clang --linkopt='-Wl,-rpath,$(llvm-config --libdir)' build:clang-asan --action_env=ENVOY_UBSAN_VPTR=1 build:clang-asan --copt=-fsanitize=vptr,function build:clang-asan --linkopt=-fsanitize=vptr,function build:clang-asan --linkopt='-L${RT_LIBRARY_PATH}' build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone-x86_64.a build:clang-asan --linkopt=-l:libclang_rt.ubsan_standalone_cxx-x86_64.a " > "${BAZELRC_FILE}" ================================================ FILE: bazel/setup_local_tsan.sh ================================================ #!/bin/bash BAZELRC_FILE="${BAZELRC_FILE:-$(bazel info workspace)/local_tsan.bazelrc}" LIBCXX_PREFIX=$1 if [[ ! -e "${LIBCXX_PREFIX}/lib" ]]; then echo "Error: cannot find /lib in ${LIBCXX_PREFIX}." exit 1 fi echo "# Generated file, do not edit. Delete this file if you no longer use local tsan-instrumented libc++ build:local-tsan --config=libc++ build:local-tsan --config=clang-tsan build:local-tsan --linkopt=-L${LIBCXX_PREFIX}/lib build:local-tsan --linkopt=-Wl,-rpath,${LIBCXX_PREFIX}/lib " > "${BAZELRC_FILE}" ================================================ FILE: bazel/sh_test_wrapper.sh ================================================ #!/bin/bash # Dummy shell implementation for nooping tests. # TODO(lizan): remove when we have a solution for # https://github.com/bazelbuild/bazel/issues/3510 cd "$(dirname "$0")" || exit 1 if [ $# -gt 0 ]; then "./${1}" "${@:2}" fi ================================================ FILE: bazel/tclap-win64-ull-sizet.patch ================================================ diff --git a/include/tclap/StandardTraits.h b/include/tclap/StandardTraits.h index 46d7f6f..117057b 100644 --- a/include/tclap/StandardTraits.h +++ b/include/tclap/StandardTraits.h @@ -123,8 +123,9 @@ struct ArgTraits { typedef ValueLike ValueCategory; }; -// Microsoft implements size_t awkwardly. -#if defined(_MSC_VER) && defined(_M_X64) +// Microsoft implements size_t awkwardly. +// Studio 2005 introduces unsigned long long, which conflicts with the size_t template +#if defined(_MSC_VER) && (_MSC_VER < 1400) && defined(_M_X64) /** * size_ts have value-like semantics. */ ================================================ FILE: bazel/test/BUILD ================================================ licenses(["notice"]) # Apache 2 exports_files(["verify_tap_test.sh"]) ================================================ FILE: bazel/test/verify_tap_test.sh ================================================ #!/bin/bash set -ex # Clear existing tap directory if previous run wasn't in sandbox rm -rf tap mkdir -p tap TAP_TMP="$(realpath tap)" TAP_PATH="${TAP_TMP}/tap" "$@" # TODO(htuch): Check for pcap, now CI (with or without RBE) does have # enough capabilities. # Verify that some pb_text files have been created. ls -l "${TAP_TMP}"/tap_*.pb_text > /dev/null ================================================ FILE: bazel/test_for_benchmark_wrapper.sh ================================================ #!/bin/bash # Set the benchmark time to 0 to just verify that the benchmark runs to # completion. We're interacting with two different flag parsers, so the order # of flags and the -- matters. "${TEST_SRCDIR}/envoy/${1}" "${@:2}" --skip_expensive_benchmarks -- --benchmark_min_time=0 ================================================ FILE: bazel/toolchains/BUILD ================================================ licenses(["notice"]) # Apache 2 platform( name = "rbe_ubuntu_clang_platform", parents = ["@rbe_ubuntu_clang//config:platform"], remote_execution_properties = """ {PARENT_REMOTE_EXECUTION_PROPERTIES} properties: { name: "dockerAddCapabilities" value: "SYS_PTRACE,NET_RAW,NET_ADMIN" } properties: { name: "dockerNetwork" value: "standard" } """, ) ================================================ FILE: bazel/upb.patch ================================================ # https://github.com/protocolbuffers/upb/pull/226 diff --git a/bazel/upb_proto_library.bzl b/bazel/upb_proto_library.bzl index f148745be..21ed34b48 100644 --- a/bazel/upb_proto_library.bzl +++ b/bazel/upb_proto_library.bzl @@ -8,7 +8,7 @@ load("@bazel_tools//tools/cpp:toolchain_utils.bzl", "find_cpp_toolchain") # copybara:strip_for_google3_begin load("@bazel_skylib//lib:versions.bzl", "versions") -load("@bazel_version//:bazel_version.bzl", "bazel_version") +load("@upb_bazel_version//:bazel_version.bzl", "bazel_version") # copybara:strip_end # Generic support code ######################################################### diff --git a/bazel/workspace_deps.bzl b/bazel/workspace_deps.bzl index 39bf524a7..aabbc3411 100644 --- a/bazel/workspace_deps.bzl +++ b/bazel/workspace_deps.bzl @@ -5,7 +5,7 @@ load("//bazel:repository_defs.bzl", "bazel_version_repository") def upb_deps(): bazel_version_repository( - name = "bazel_version", + name = "upb_bazel_version", ) git_repository( ================================================ FILE: bazel/wasm/BUILD ================================================ licenses(["notice"]) # Apache 2 ================================================ FILE: bazel/wasm/wasm.bzl ================================================ load("@io_bazel_rules_rust//rust:rust.bzl", "rust_binary") load("@rules_cc//cc:defs.bzl", "cc_binary") def _wasm_cc_transition_impl(settings, attr): return { "//command_line_option:cpu": "wasm32", "//command_line_option:crosstool_top": "@proxy_wasm_cpp_sdk//toolchain:emscripten", # Overriding copt/cxxopt/linkopt to prevent sanitizers/coverage options leak # into WASM build configuration "//command_line_option:copt": [], "//command_line_option:cxxopt": [], "//command_line_option:linkopt": [], "//command_line_option:collect_code_coverage": "false", "//command_line_option:fission": "no", } def _wasm_rust_transition_impl(settings, attr): return { "//command_line_option:platforms": "@io_bazel_rules_rust//rust/platform:wasm", } wasm_cc_transition = transition( implementation = _wasm_cc_transition_impl, inputs = [], outputs = [ "//command_line_option:cpu", "//command_line_option:crosstool_top", "//command_line_option:copt", "//command_line_option:cxxopt", "//command_line_option:fission", "//command_line_option:linkopt", "//command_line_option:collect_code_coverage", ], ) wasm_rust_transition = transition( implementation = _wasm_rust_transition_impl, inputs = [], outputs = [ "//command_line_option:platforms", ], ) def _wasm_binary_impl(ctx): out = ctx.actions.declare_file(ctx.label.name) if ctx.attr.precompile: ctx.actions.run( executable = ctx.executable._compile_tool, arguments = [ctx.files.binary[0].path, out.path], outputs = [out], inputs = ctx.files.binary, ) else: ctx.actions.run( executable = "cp", arguments = [ctx.files.binary[0].path, out.path], outputs = [out], inputs = ctx.files.binary, ) return [DefaultInfo(files = depset([out]), runfiles = ctx.runfiles([out]))] def _wasm_attrs(transition): return { "binary": attr.label(mandatory = True, cfg = transition), "precompile": attr.bool(default = False), # This is deliberately in target configuration to avoid compiling v8 twice. "_compile_tool": attr.label(default = "@envoy//test/tools/wee8_compile:wee8_compile_tool", executable = True, cfg = "target"), "_whitelist_function_transition": attr.label(default = "@bazel_tools//tools/whitelists/function_transition_whitelist"), } # WASM binary rule implementation. # This copies the binary specified in binary attribute in WASM configuration to # target configuration, so a binary in non-WASM configuration can depend on them. wasm_cc_binary_rule = rule( implementation = _wasm_binary_impl, attrs = _wasm_attrs(wasm_cc_transition), ) wasm_rust_binary_rule = rule( implementation = _wasm_binary_impl, attrs = _wasm_attrs(wasm_rust_transition), ) def wasm_cc_binary(name, tags = [], repository = "", **kwargs): wasm_name = "_wasm_" + name kwargs.setdefault("additional_linker_inputs", ["@proxy_wasm_cpp_sdk//:jslib", "@envoy//source/extensions/common/wasm/ext:jslib"]) if repository == "@envoy": envoy_js = "--js-library source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js" else: envoy_js = "--js-library external/envoy/source/extensions/common/wasm/ext/envoy_wasm_intrinsics.js" kwargs.setdefault("linkopts", [ envoy_js, "--js-library external/proxy_wasm_cpp_sdk/proxy_wasm_intrinsics.js", ]) kwargs.setdefault("visibility", ["//visibility:public"]) cc_binary( name = wasm_name, # Adding manual tag it won't be built in non-WASM (e.g. x86_64 config) # when an wildcard is specified, but it will be built in WASM configuration # when the wasm_binary below is built. tags = ["manual"], **kwargs ) wasm_cc_binary_rule( name = name, binary = ":" + wasm_name, tags = tags + ["manual"], ) def envoy_wasm_cc_binary(name, tags = [], **kwargs): wasm_cc_binary(name, tags, repository = "@envoy", **kwargs) def wasm_rust_binary(name, tags = [], **kwargs): wasm_name = "_wasm_" + (name if not ".wasm" in name else name.strip(".wasm")) kwargs.setdefault("visibility", ["//visibility:public"]) rust_binary( name = wasm_name, edition = "2018", crate_type = "cdylib", out_binary = True, tags = ["manual"], **kwargs ) wasm_rust_binary_rule( name = name, precompile = select({ "@envoy//bazel:linux_x86_64": True, "//conditions:default": False, }), binary = ":" + wasm_name, tags = tags + ["manual"], ) ================================================ FILE: ci/Dockerfile-envoy ================================================ ARG BUILD_OS=ubuntu ARG BUILD_TAG=18.04 # Build stage FROM buildpack-deps:$BUILD_TAG as build RUN echo "d6c40440609a23483f12eb6295b5191e94baf08298a856bab6e15b10c3b82891 /tmp/su-exec.c" > /tmp/checksum \ && curl -o /tmp/su-exec.c https://raw.githubusercontent.com/ncopa/su-exec/212b75144bbc06722fbd7661f651390dc47a43d1/su-exec.c \ && sha256sum -c /tmp/checksum \ && gcc -Wall /tmp/su-exec.c -o/usr/local/bin/su-exec \ && chown root:root /usr/local/bin/su-exec \ && chmod 0755 /usr/local/bin/su-exec # Final stage FROM $BUILD_OS:$BUILD_TAG ARG TARGETPLATFORM RUN apt-get update && apt-get upgrade -y \ && apt-get install --no-install-recommends -y ca-certificates \ && apt-get autoremove -y && apt-get clean \ && rm -rf /tmp/* /var/tmp/* \ && rm -rf /var/lib/apt/lists/* COPY --from=build /usr/local/bin/su-exec /usr/local/bin/su-exec RUN adduser --group --system envoy RUN mkdir -p /etc/envoy ARG ENVOY_BINARY_SUFFIX=_stripped ADD ${TARGETPLATFORM}/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml EXPOSE 10000 COPY ci/docker-entrypoint.sh / ENTRYPOINT ["/docker-entrypoint.sh"] CMD ["envoy", "-c", "/etc/envoy/envoy.yaml"] ================================================ FILE: ci/Dockerfile-envoy-alpine ================================================ FROM frolvlad/alpine-glibc:alpine-3.12_glibc-2.31 RUN mkdir -p /etc/envoy ADD configs/google_com_proxy.v2.yaml /etc/envoy/envoy.yaml RUN apk add --no-cache shadow su-exec \ && addgroup -S envoy && adduser --no-create-home -S envoy -G envoy ARG ENVOY_BINARY_SUFFIX=_stripped ADD linux/amd64/build_release${ENVOY_BINARY_SUFFIX}/* /usr/local/bin/ EXPOSE 10000 COPY ci/docker-entrypoint.sh / ENTRYPOINT ["/docker-entrypoint.sh"] CMD ["envoy", "-c", "/etc/envoy/envoy.yaml"] ================================================ FILE: ci/Dockerfile-envoy-google-vrp ================================================ ARG ENVOY_VRP_BASE_IMAGE FROM $ENVOY_VRP_BASE_IMAGE RUN apt-get update \ && apt-get upgrade -y \ && apt-get install -y libc++1 supervisor gdb strace tshark \ && apt-get autoremove -y \ && apt-get clean \ && rm -rf /tmp/* /var/tmp/* \ && rm -rf /var/lib/apt/lists/* ADD configs/google-vrp/envoy-edge.yaml /etc/envoy/envoy-edge.yaml ADD configs/google-vrp/envoy-origin.yaml /etc/envoy/envoy-origin.yaml ADD configs/google-vrp/launch_envoy.sh /usr/local/bin/launch_envoy.sh ADD configs/google-vrp/supervisor.conf /etc/supervisor.conf ADD test/config/integration/certs/serverkey.pem /etc/envoy/certs/serverkey.pem ADD test/config/integration/certs/servercert.pem /etc/envoy/certs/servercert.pem # ADD %local envoy bin% /usr/local/bin/envoy EXPOSE 10000 EXPOSE 10001 CMD ["supervisord", "-c", "/etc/supervisor.conf"] ================================================ FILE: ci/README.md ================================================ # Developer use of CI Docker images There are two available flavors of Envoy Docker images for Linux, based on Ubuntu and Alpine Linux and an image based on Windows2019. ## Ubuntu Envoy image The Ubuntu based Envoy Docker image at [`envoyproxy/envoy-build:`](https://hub.docker.com/r/envoyproxy/envoy-build/) is used for CircleCI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers may work with the latest build image SHA in [envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools/blob/master/toolchains/rbe_toolchains_config.bzl#L8) repo to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Ubuntu Envoy image. Moreover, the Docker image at [`envoyproxy/envoy-dev:`](https://hub.docker.com/r/envoyproxy/envoy-dev/) is an image that has an Envoy binary at `/usr/local/bin/envoy`. The `` corresponds to the master commit at which the binary was compiled. Lastly, `envoyproxy/envoy-dev:latest` contains an Envoy binary built from the latest tip of master that passed tests. ## Alpine Envoy image Minimal images based on Alpine Linux allow for quicker deployment of Envoy. Two Alpine based images are built, one with an Envoy binary with debug (`envoyproxy/envoy-alpine-debug`) symbols and one stripped of them (`envoyproxy/envoy-alpine`). Both images are pushed with two different tags: `` and `latest`. Parallel to the Ubuntu images above, `` corresponds to the master commit at which the binary was compiled, and `latest` corresponds to a binary built from the latest tip of master that passed tests. ## Windows 2019 Envoy image The Windows 2019 based Envoy Docker image at [`envoyproxy/envoy-build-windows2019:`](https://hub.docker.com/r/envoyproxy/envoy-build-windows2019/) is used for CI checks, where `` is specified in [`envoy_build_sha.sh`](https://github.com/envoyproxy/envoy/blob/master/ci/envoy_build_sha.sh). Developers may work with the most recent `envoyproxy/envoy-build-windows2019` image to provide a self-contained environment for building Envoy binaries and running tests that reflects the latest built Windows 2019 Envoy image. # Build image base and compiler versions Currently there are three build images for Linux and one for Windows: * `envoyproxy/envoy-build` — alias to `envoyproxy/envoy-build-ubuntu`. * `envoyproxy/envoy-build-ubuntu` — based on Ubuntu 18.04 (Bionic) with GCC 9 and Clang 10 compiler. * `envoyproxy/envoy-build-centos` — based on CentOS 7 with GCC 9 and Clang 10 compiler, this image is experimental and not well tested. * `envoyproxy/envoy-build-windows2019` — based on Windows 2019 LTS with VS 2019 Build Tools. The source for these images is located in the [envoyproxy/envoy-build-tools](https://github.com/envoyproxy/envoy-build-tools) repository. We use the Clang compiler for all Linux CI runs with tests. We have an additional Linux CI run with GCC which builds binary only. # C++ standard library As of November 2019 after [#8859](https://github.com/envoyproxy/envoy/pull/8859) the official released binary is [linked against libc++ on Linux](https://github.com/envoyproxy/envoy/blob/master/bazel/README.md#linking-against-libc-on-linux). To override the C++ standard library in your build, set environment variable `ENVOY_STDLIB` to `libstdc++` or `libc++` and run `./ci/do_ci.sh` as described below. # Building and running tests as a developer The `./ci/run_envoy_docker.sh` script can be used to set up a Docker container on Linux and Windows to build an Envoy static binary and run tests. The build image defaults to `envoyproxy/envoy-build-ubuntu` on Linux and `envoyproxy/envoy-build-windows2019` on Windows, but you can choose build image by setting `IMAGE_NAME` in the environment. In case your setup is behind a proxy, set `http_proxy` and `https_proxy` to the proxy servers before invoking the build. ```bash IMAGE_NAME=envoyproxy/envoy-build-ubuntu http_proxy=http://proxy.foo.com:8080 https_proxy=http://proxy.bar.com:8080 ./ci/run_envoy_docker.sh ' ``` ## On Linux An example basic invocation to build a developer version of the Envoy static binary (using the Bazel `fastbuild` type) is: ```bash ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' ``` The Envoy binary can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy-fastbuild` on the Docker host. You can control this by setting `ENVOY_DOCKER_BUILD_DIR` in the environment, e.g. to generate the binary in `~/build/envoy/source/exe/envoy-fastbuild` you can run: ```bash ENVOY_DOCKER_BUILD_DIR=~/build ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' ``` For a release version of the Envoy binary you can run: ```bash ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.release.server_only' ``` The build artifact can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy` (or wherever `$ENVOY_DOCKER_BUILD_DIR` points). For a debug version of the Envoy binary you can run: ```bash ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.debug.server_only' ``` The build artifact can be found in `/tmp/envoy-docker-build/envoy/source/exe/envoy-debug` (or wherever `$ENVOY_DOCKER_BUILD_DIR` points). To leverage a [bazel remote cache](https://github.com/envoyproxy/envoy/tree/master/bazel#advanced-caching-setup) add the http_remote_cache endpoint to the BAZEL_BUILD_EXTRA_OPTIONS environment variable ```bash ./ci/run_envoy_docker.sh "BAZEL_BUILD_EXTRA_OPTIONS='--remote_http_cache=http://127.0.0.1:28080' ./ci/do_ci.sh bazel.release" ``` The `./ci/run_envoy_docker.sh './ci/do_ci.sh '` targets are: * `bazel.api` — build and run API tests under `-c fastbuild` with clang. * `bazel.asan` — build and run tests under `-c dbg --config=clang-asan` with clang. * `bazel.asan ` — build and run a specified test or test dir under `-c dbg --config=clang-asan` with clang. * `bazel.debug` — build Envoy static binary and run tests under `-c dbg`. * `bazel.debug ` — build Envoy static binary and run a specified test or test dir under `-c dbg`. * `bazel.debug.server_only` — build Envoy static binary under `-c dbg`. * `bazel.dev` — build Envoy static binary and run tests under `-c fastbuild` with clang. * `bazel.dev ` — build Envoy static binary and run a specified test or test dir under `-c fastbuild` with clang. * `bazel.release` — build Envoy static binary and run tests under `-c opt` with clang. * `bazel.release ` — build Envoy static binary and run a specified test or test dir under `-c opt` with clang. * `bazel.release.server_only` — build Envoy static binary under `-c opt` with clang. * `bazel.sizeopt` — build Envoy static binary and run tests under `-c opt --config=sizeopt` with clang. * `bazel.sizeopt ` — build Envoy static binary and run a specified test or test dir under `-c opt --config=sizeopt` with clang. * `bazel.sizeopt.server_only` — build Envoy static binary under `-c opt --config=sizeopt` with clang. * `bazel.coverage` — build and run tests under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`. * `bazel.coverage ` — build and run a specified test or test dir under `-c dbg` with gcc, generating coverage information in `$ENVOY_DOCKER_BUILD_DIR/envoy/generated/coverage/coverage.html`. * `bazel.coverity` — build Envoy static binary and run Coverity Scan static analysis. * `bazel.msan` — build and run tests under `-c dbg --config=clang-msan` with clang. * `bazel.msan ` — build and run a specified test or test dir under `-c dbg --config=clang-msan` with clang. * `bazel.tsan` — build and run tests under `-c dbg --config=clang-tsan` with clang. * `bazel.tsan ` — build and run a specified test or test dir under `-c dbg --config=clang-tsan` with clang. * `bazel.fuzz` — build and run fuzz tests under `-c dbg --config=asan-fuzzer` with clang. * `bazel.fuzz ` — build and run a specified fuzz test or test dir under `-c dbg --config=asan-fuzzer` with clang. If specifying a single fuzz test, must use the full target name with "_with_libfuzzer" for ``. * `bazel.compile_time_options` — build Envoy and run tests with various compile-time options toggled to their non-default state, to ensure they still build. * `bazel.compile_time_options ` — build Envoy and run a specified test or test dir with various compile-time options toggled to their non-default state, to ensure they still build. * `bazel.clang_tidy ` — build and run clang-tidy specified source files, if no files specified, runs against the diff with the last GitHub commit. * `check_format`— run `clang-format` and `buildifier` on entire source tree. * `fix_format`— run and enforce `clang-format` and `buildifier` on entire source tree. * `check_spelling`— run `misspell` on entire project. * `fix_spelling`— run and enforce `misspell` on entire project. * `check_spelling_pedantic`— run `aspell` on C++ and proto comments. * `docs`— build documentation tree in `generated/docs`. ## On Windows An example basic invocation to build the Envoy static binary and run tests is: ```bash ./ci/run_envoy_docker.sh './ci/windows_ci_steps.sh' ``` You can modify `./ci/windows_ci_steps.sh` to modify `bazel` arguments, tests to run, etc. as well as set environment variables to adjust your container build environment as described above. The Envoy binary can be found in `C:\Windows\Temp\envoy-docker-build\envoy\source\exe` on the Docker host. You can control this by setting `ENVOY_DOCKER_BUILD_DIR` in the environment, e.g. to generate the binary in `C:\Users\foo\build\envoy\source\exe` you can run: ```bash ENVOY_DOCKER_BUILD_DIR="C:\Users\foo\build" ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev' ``` Note the quotations around the `ENVOY_DOCKER_BUILD_DIR` value to preserve the backslashes in the path. If you would like to run an interactive session to keep the build container running (to persist your local build environment), run: ```bash ./ci/run_envoy_docker.sh 'bash' ``` From an interactive session, you can invoke `bazel` manually or use the `./ci/windows_ci_steps.sh` script to build and run tests. # Testing changes to the build image as a developer While all changes to the build image should eventually be upstreamed, it can be useful to test those changes locally before sending out a pull request. To experiment with a local clone of the upstream build image you can make changes to files such as build_container.sh locally and then run: ```bash DISTRO=ubuntu cd ci/build_container LINUX_DISTRO="${DISTRO}" CIRCLE_SHA1=my_tag ./docker_build.sh # Wait patiently for quite some time cd ../.. IMAGE_NAME="envoyproxy/envoy-build-${DISTRO}" IMAGE_ID=my_tag ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.whatever' ``` This build the Ubuntu based `envoyproxy/envoy-build-ubuntu` image, and the final call will run against your local copy of the build image. # macOS Build Flow The macOS CI build is part of the [CircleCI](https://circleci.com/gh/envoyproxy/envoy) workflow. Dependencies are installed by the `ci/mac_ci_setup.sh` script, via [Homebrew](https://brew.sh), which is pre-installed on the CircleCI macOS image. The dependencies are cached are re-installed on every build. The `ci/mac_ci_steps.sh` script executes the specific commands that build and test Envoy. Note that the full version of Xcode (not just Command Line Tools) is required. # Coverity Scan Build Flow [Coverity Scan Envoy Project](https://scan.coverity.com/projects/envoy-proxy) Coverity Scan static analysis is not run within Envoy CI. However, Envoy can be locally built and submitted for analysis. A Coverity Scan Envoy project token must be generated from the [Coverity Project Settings](https://scan.coverity.com/projects/envoy-proxy?tab=project_settings). Only a Coverity Project Administrator can create a token. With this token, running `ci/do_coverity_local.sh` will use the Ubuntu based `envoyproxy/envoy-build-ubuntu` image to build the Envoy static binary with the Coverity Scan tool chain. This process generates an artifact, envoy-coverity-output.tgz, that is uploaded to Coverity for static analysis. To build and submit for analysis: ```bash COVERITY_TOKEN={generated Coverity project token} ./ci/do_coverity_local.sh ``` ================================================ FILE: ci/WORKSPACE.filter.example ================================================ workspace(name = "envoy_filter_example") local_repository( name = "envoy", path = "{ENVOY_SRCDIR}", ) load("@envoy//bazel:api_binding.bzl", "envoy_api_binding") envoy_api_binding() load("@envoy//bazel:api_repositories.bzl", "envoy_api_dependencies") envoy_api_dependencies() load("@envoy//bazel:repositories.bzl", "envoy_dependencies") envoy_dependencies() load("@envoy//bazel:repositories_extra.bzl", "envoy_dependencies_extra") envoy_dependencies_extra() load("@envoy//bazel:dependency_imports.bzl", "envoy_dependency_imports") envoy_dependency_imports() ================================================ FILE: ci/api_mirror.sh ================================================ #!/bin/bash set -e CHECKOUT_DIR=../data-plane-api if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] then echo "Cloning..." git clone git@github.com:envoyproxy/data-plane-api "$CHECKOUT_DIR" git -C "$CHECKOUT_DIR" config user.name "data-plane-api(CircleCI)" git -C "$CHECKOUT_DIR" config user.email data-plane-api@users.noreply.github.com git -C "$CHECKOUT_DIR" fetch git -C "$CHECKOUT_DIR" checkout -B master origin/master # Determine last envoyproxy/envoy SHA in envoyproxy/data-plane-api MIRROR_MSG="Mirrored from https://github.com/envoyproxy/envoy" LAST_ENVOY_SHA=$(git -C "$CHECKOUT_DIR" log --grep="$MIRROR_MSG" -n 1 | grep "$MIRROR_MSG" | \ tail -n 1 | sed -e "s#.*$MIRROR_MSG @ ##") echo "Last mirrored envoyproxy/envoy SHA is $LAST_ENVOY_SHA" # Compute SHA sequence to replay in envoyproxy/data-plane-api SHAS=$(git rev-list --reverse "$LAST_ENVOY_SHA"..HEAD api/) # For each SHA, hard reset, rsync api/ and generate commit in # envoyproxy/data-plane-api API_WORKING_DIR="../envoy-api-mirror" git worktree add "$API_WORKING_DIR" for sha in $SHAS do git -C "$API_WORKING_DIR" reset --hard "$sha" COMMIT_MSG=$(git -C "$API_WORKING_DIR" log --format=%B -n 1) QUALIFIED_COMMIT_MSG=$(echo -e "$COMMIT_MSG\n\n$MIRROR_MSG @ $sha") rsync -acv --delete --exclude "ci/" --exclude ".*" --exclude LICENSE \ "$API_WORKING_DIR"/api/ "$CHECKOUT_DIR"/ git -C "$CHECKOUT_DIR" add . git -C "$CHECKOUT_DIR" commit -m "$QUALIFIED_COMMIT_MSG" done echo "Pushing..." git -C "$CHECKOUT_DIR" push origin master echo "Done" fi ================================================ FILE: ci/build_setup.sh ================================================ #!/bin/bash # Configure environment variables for Bazel build and test. set -e export PPROF_PATH=/thirdparty_build/bin/pprof [ -z "${NUM_CPUS}" ] && NUM_CPUS=$(grep -c ^processor /proc/cpuinfo) [ -z "${ENVOY_SRCDIR}" ] && export ENVOY_SRCDIR=/source [ -z "${ENVOY_BUILD_TARGET}" ] && export ENVOY_BUILD_TARGET=//source/exe:envoy-static [ -z "${ENVOY_BUILD_DEBUG_INFORMATION}" ] && export ENVOY_BUILD_DEBUG_INFORMATION=//source/exe:envoy-static.dwp [ -z "${ENVOY_BUILD_ARCH}" ] && { ENVOY_BUILD_ARCH=$(uname -m) export ENVOY_BUILD_ARCH } read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}" read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}" read -ra BAZEL_OPTIONS <<< "${BAZEL_OPTIONS:-}" echo "ENVOY_SRCDIR=${ENVOY_SRCDIR}" echo "ENVOY_BUILD_TARGET=${ENVOY_BUILD_TARGET}" echo "ENVOY_BUILD_ARCH=${ENVOY_BUILD_ARCH}" function setup_gcc_toolchain() { if [[ -n "${ENVOY_STDLIB}" && "${ENVOY_STDLIB}" != "libstdc++" ]]; then echo "gcc toolchain doesn't support ${ENVOY_STDLIB}." exit 1 fi if [[ -z "${ENVOY_RBE}" ]]; then export CC=gcc export CXX=g++ export BAZEL_COMPILER=gcc echo "$CC/$CXX toolchain configured" else BAZEL_BUILD_OPTIONS=("--config=remote-gcc" "${BAZEL_BUILD_OPTIONS[@]}") fi } function setup_clang_toolchain() { ENVOY_STDLIB="${ENVOY_STDLIB:-libc++}" if [[ -z "${ENVOY_RBE}" ]]; then if [[ "${ENVOY_STDLIB}" == "libc++" ]]; then BAZEL_BUILD_OPTIONS=("--config=libc++" "${BAZEL_BUILD_OPTIONS[@]}") else BAZEL_BUILD_OPTIONS=("--config=clang" "${BAZEL_BUILD_OPTIONS[@]}") fi else if [[ "${ENVOY_STDLIB}" == "libc++" ]]; then BAZEL_BUILD_OPTIONS=("--config=remote-clang-libc++" "${BAZEL_BUILD_OPTIONS[@]}") else BAZEL_BUILD_OPTIONS=("--config=remote-clang" "${BAZEL_BUILD_OPTIONS[@]}") fi fi echo "clang toolchain with ${ENVOY_STDLIB} configured" } export BUILD_DIR=${BUILD_DIR:-/build} if [[ ! -d "${BUILD_DIR}" ]] then echo "${BUILD_DIR} mount missing - did you forget -v :${BUILD_DIR}? Creating." mkdir -p "${BUILD_DIR}" fi # Environment setup. export TEST_TMPDIR=${BUILD_DIR}/tmp export PATH=/opt/llvm/bin:${PATH} export CLANG_FORMAT="${CLANG_FORMAT:-clang-format}" if [[ -f "/etc/redhat-release" ]]; then BAZEL_BUILD_EXTRA_OPTIONS+=("--copt=-DENVOY_IGNORE_GLIBCXX_USE_CXX11_ABI_ERROR=1") fi function cleanup() { # Remove build artifacts. This doesn't mess with incremental builds as these # are just symlinks. rm -rf "${ENVOY_SRCDIR}"/bazel-* clang.bazelrc } cleanup trap cleanup EXIT export LLVM_ROOT="${LLVM_ROOT:-/opt/llvm}" "$(dirname "$0")"/../bazel/setup_clang.sh "${LLVM_ROOT}" [[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=("--nocache_test_results") # TODO(phlax): deprecate/remove this - i believe it was made redundant here: # https://github.com/envoyproxy/envoy/commit/3ebedeb708a23062332a6fcdf33b462b7070adba#diff-2fa22a1337effee365a51e6844be0ab3 export BAZEL_QUERY_OPTIONS="${BAZEL_OPTIONS[*]}" # Use https://docs.bazel.build/versions/master/command-line-reference.html#flag--experimental_repository_cache_hardlinks # to save disk space. BAZEL_BUILD_OPTIONS=( "${BAZEL_OPTIONS[@]}" "--verbose_failures" "--show_task_finish" "--experimental_generate_json_trace_profile" "--test_output=errors" "--repository_cache=${BUILD_DIR}/repository_cache" "--experimental_repository_cache_hardlinks" "${BAZEL_BUILD_EXTRA_OPTIONS[@]}" "${BAZEL_EXTRA_TEST_OPTIONS[@]}") [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]] && BAZEL_BUILD_OPTIONS+=( "--define" "wasm=disabled" "--flaky_test_attempts=2" "--test_env=HEAPCHECK=") [[ "${BAZEL_EXPUNGE}" == "1" ]] && bazel clean --expunge # Also setup some space for building Envoy standalone. export ENVOY_BUILD_DIR="${BUILD_DIR}"/envoy mkdir -p "${ENVOY_BUILD_DIR}" # This is where we copy build deliverables to. export ENVOY_DELIVERY_DIR="${ENVOY_BUILD_DIR}"/source/exe mkdir -p "${ENVOY_DELIVERY_DIR}" # This is where we copy the coverage report to. export ENVOY_COVERAGE_ARTIFACT="${ENVOY_BUILD_DIR}"/generated/coverage.tar.gz # This is where we copy the fuzz coverage report to. export ENVOY_FUZZ_COVERAGE_ARTIFACT="${ENVOY_BUILD_DIR}"/generated/fuzz_coverage.tar.gz # This is where we dump failed test logs for CI collection. export ENVOY_FAILED_TEST_LOGS="${ENVOY_BUILD_DIR}"/generated/failed-testlogs mkdir -p "${ENVOY_FAILED_TEST_LOGS}" # This is where we copy the build profile to. export ENVOY_BUILD_PROFILE="${ENVOY_BUILD_DIR}"/generated/build-profile mkdir -p "${ENVOY_BUILD_PROFILE}" export BUILDIFIER_BIN="${BUILDIFIER_BIN:-/usr/local/bin/buildifier}" export BUILDOZER_BIN="${BUILDOZER_BIN:-/usr/local/bin/buildozer}" # We set up an Envoy consuming project for test builds only if '-nofetch' # is not set AND this is an Envoy build. For derivative builds where Envoy # source tree is different than the current workspace, the setup step is # skipped. if [[ "$1" != "-nofetch" && "${ENVOY_SRCDIR}" == "$(bazel info workspace)" ]]; then # shellcheck source=ci/filter_example_setup.sh . "$(dirname "$0")"/filter_example_setup.sh else echo "Skip setting up Envoy Filter Example." fi export ENVOY_BUILD_FILTER_EXAMPLE="${FILTER_WORKSPACE_SET:-0}" ================================================ FILE: ci/check_and_fix_format.sh ================================================ #!/bin/bash set -e DIFF_OUTPUT="${DIFF_OUTPUT:-/build/fix_format.diff}" # We set this for two reasons. First, we want to ensure belt-and-braces that we check these formats # in CI in case the skip-on-file-change heuristics in proto_format.sh etc. are buggy. Second, this # prevents AZP cache weirdness. export FORCE_PROTO_FORMAT=yes export FORCE_PYTHON_FORMAT=yes function fix { set +e ci/do_ci.sh fix_format ci/do_ci.sh fix_spelling ci/do_ci.sh fix_spelling_pedantic echo "Format check failed, try apply following patch to fix:" git add api git diff HEAD | tee "${DIFF_OUTPUT}" exit 1 } # If any of the checks fail, run the fix function above. trap fix ERR ci/do_ci.sh check_format ci/do_ci.sh check_repositories ci/do_ci.sh check_spelling ci/do_ci.sh check_spelling_pedantic ================================================ FILE: ci/do_ci.sh ================================================ #!/bin/bash # Run a CI build/test target, e.g. docs, asan. set -e build_setup_args="" if [[ "$1" == "fix_format" || "$1" == "check_format" || "$1" == "check_repositories" || \ "$1" == "check_spelling" || "$1" == "fix_spelling" || "$1" == "bazel.clang_tidy" || \ "$1" == "check_spelling_pedantic" || "$1" == "fix_spelling_pedantic" ]]; then build_setup_args="-nofetch" fi SRCDIR="${PWD}" NO_BUILD_SETUP="${NO_BUILD_SETUP:-}" if [[ -z "$NO_BUILD_SETUP" ]]; then # shellcheck source=ci/setup_cache.sh . "$(dirname "$0")"/setup_cache.sh # shellcheck source=ci/build_setup.sh . "$(dirname "$0")"/build_setup.sh $build_setup_args fi cd "${SRCDIR}" if [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]]; then BUILD_ARCH_DIR="/linux/amd64" elif [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then BUILD_ARCH_DIR="/linux/arm64" else # Fall back to use the ENVOY_BUILD_ARCH itself. BUILD_ARCH_DIR="/linux/${ENVOY_BUILD_ARCH}" fi echo "building using ${NUM_CPUS} CPUs" echo "building for ${ENVOY_BUILD_ARCH}" function collect_build_profile() { declare -g build_profile_count=${build_profile_count:-1} mv -f "$(bazel info output_base)/command.profile.gz" "${ENVOY_BUILD_PROFILE}/${build_profile_count}-$1.profile.gz" || true ((build_profile_count++)) } function bazel_with_collection() { local failed_logs declare -r BAZEL_OUTPUT="${ENVOY_SRCDIR}"/bazel.output.txt bazel "$@" | tee "${BAZEL_OUTPUT}" declare BAZEL_STATUS="${PIPESTATUS[0]}" if [ "${BAZEL_STATUS}" != "0" ] then pushd bazel-testlogs failed_logs=$(grep " /build.*test.log" "${BAZEL_OUTPUT}" | sed -e 's/ \/build.*\/testlogs\/\(.*\)/\1/') while read -r f; do cp --parents -f "$f" "${ENVOY_FAILED_TEST_LOGS}" done <<< "$failed_logs" popd exit "${BAZEL_STATUS}" fi collect_build_profile "$1" run_process_test_result } function cp_binary_for_outside_access() { DELIVERY_LOCATION="$1" cp -f \ bazel-bin/"${ENVOY_BIN}" \ "${ENVOY_DELIVERY_DIR}"/"${DELIVERY_LOCATION}" } function cp_debug_info_for_outside_access() { DELIVERY_LOCATION="$1" cp -f \ bazel-bin/"${ENVOY_BIN}".dwp \ "${ENVOY_DELIVERY_DIR}"/"${DELIVERY_LOCATION}".dwp } function cp_binary_for_image_build() { # TODO(mattklein123): Replace this with caching and a different job which creates images. local BASE_TARGET_DIR="${ENVOY_SRCDIR}${BUILD_ARCH_DIR}" echo "Copying binary for image build..." COMPILE_TYPE="$2" mkdir -p "${BASE_TARGET_DIR}"/build_"$1" cp -f "${ENVOY_DELIVERY_DIR}"/envoy "${BASE_TARGET_DIR}"/build_"$1" if [[ "${COMPILE_TYPE}" == "dbg" || "${COMPILE_TYPE}" == "opt" ]]; then cp -f "${ENVOY_DELIVERY_DIR}"/envoy.dwp "${BASE_TARGET_DIR}"/build_"$1" fi mkdir -p "${BASE_TARGET_DIR}"/build_"$1"_stripped strip "${ENVOY_DELIVERY_DIR}"/envoy -o "${BASE_TARGET_DIR}"/build_"$1"_stripped/envoy # Copy for azp which doesn't preserve permissions, creating a tar archive tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${BASE_TARGET_DIR}" build_"$1" build_"$1"_stripped # Remove binaries to save space, only if BUILD_REASON exists (running in AZP) [[ -z "${BUILD_REASON}" ]] || \ rm -rf "${BASE_TARGET_DIR}"/build_"$1" "${BASE_TARGET_DIR}"/build_"$1"_stripped "${ENVOY_DELIVERY_DIR}"/envoy{,.dwp} \ bazel-bin/"${ENVOY_BIN}"{,.dwp} } function bazel_binary_build() { BINARY_TYPE="$1" if [[ "${BINARY_TYPE}" == "release" ]]; then COMPILE_TYPE="opt" elif [[ "${BINARY_TYPE}" == "debug" ]]; then COMPILE_TYPE="dbg" elif [[ "${BINARY_TYPE}" == "sizeopt" ]]; then # The COMPILE_TYPE variable is redundant in this case and is only here for # readability. It is already set in the .bazelrc config for sizeopt. COMPILE_TYPE="opt" CONFIG_ARGS="--config=sizeopt" elif [[ "${BINARY_TYPE}" == "fastbuild" ]]; then COMPILE_TYPE="fastbuild" fi echo "Building..." ENVOY_BIN=$(echo "${ENVOY_BUILD_TARGET}" | sed -e 's#^@\([^/]*\)/#external/\1#;s#^//##;s#:#/#') # This is a workaround for https://github.com/bazelbuild/bazel/issues/11834 [[ -n "${ENVOY_RBE}" ]] && rm -rf bazel-bin/"${ENVOY_BIN}"* bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${ENVOY_BUILD_TARGET}" ${CONFIG_ARGS} collect_build_profile "${BINARY_TYPE}"_build # Copy the built envoy binary somewhere that we can access outside of the # container. cp_binary_for_outside_access envoy if [[ "${COMPILE_TYPE}" == "dbg" || "${COMPILE_TYPE}" == "opt" ]]; then # Generate dwp file for debugging since we used split DWARF to reduce binary # size bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c "${COMPILE_TYPE}" "${ENVOY_BUILD_DEBUG_INFORMATION}" ${CONFIG_ARGS} # Copy the debug information cp_debug_info_for_outside_access envoy fi cp_binary_for_image_build "${BINARY_TYPE}" "${COMPILE_TYPE}" } function run_process_test_result() { echo "running flaky test reporting script" "${ENVOY_SRCDIR}"/ci/flaky_test/run_process_xml.sh "$CI_TARGET" } CI_TARGET=$1 shift if [[ $# -ge 1 ]]; then COVERAGE_TEST_TARGETS=("$@") TEST_TARGETS=("$@") else # Coverage test will add QUICHE tests by itself. COVERAGE_TEST_TARGETS=("//test/...") TEST_TARGETS=("${COVERAGE_TEST_TARGETS[@]}" "@com_googlesource_quiche//:ci_tests") fi if [[ "$CI_TARGET" == "bazel.release" ]]; then # When testing memory consumption, we want to test against exact byte-counts # where possible. As these differ between platforms and compile options, we # define the 'release' builds as canonical and test them only in CI, so the # toolchain is kept consistent. This ifdef is checked in # test/common/stats/stat_test_utility.cc when computing # Stats::TestUtil::MemoryTest::mode(). [[ "${ENVOY_BUILD_ARCH}" == "x86_64" ]] && BAZEL_BUILD_OPTIONS+=("--test_env=ENVOY_MEMORY_TEST_EXACT=true") setup_clang_toolchain echo "Testing ${TEST_TARGETS[*]} with options: ${BAZEL_BUILD_OPTIONS[*]}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c opt "${TEST_TARGETS[@]}" echo "bazel release build with tests..." bazel_binary_build release exit 0 elif [[ "$CI_TARGET" == "bazel.release.server_only" ]]; then setup_clang_toolchain echo "bazel release build..." bazel_binary_build release exit 0 elif [[ "$CI_TARGET" == "bazel.sizeopt.server_only" ]]; then setup_clang_toolchain echo "bazel size optimized build..." bazel_binary_build sizeopt exit 0 elif [[ "$CI_TARGET" == "bazel.sizeopt" ]]; then setup_clang_toolchain echo "Testing ${TEST_TARGETS[*]}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" --config=sizeopt "${TEST_TARGETS[@]}" echo "bazel size optimized build with tests..." bazel_binary_build sizeopt exit 0 elif [[ "$CI_TARGET" == "bazel.gcc" ]]; then BAZEL_BUILD_OPTIONS+=("--test_env=HEAPCHECK=") setup_gcc_toolchain echo "Testing ${TEST_TARGETS[*]}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild "${TEST_TARGETS[@]}" echo "bazel release build with gcc..." bazel_binary_build fastbuild exit 0 elif [[ "$CI_TARGET" == "bazel.debug" ]]; then setup_clang_toolchain echo "Testing ${TEST_TARGETS[*]}" bazel test "${BAZEL_BUILD_OPTIONS[@]}" -c dbg "${TEST_TARGETS[@]}" echo "bazel debug build with tests..." bazel_binary_build debug exit 0 elif [[ "$CI_TARGET" == "bazel.debug.server_only" ]]; then setup_clang_toolchain echo "bazel debug build..." bazel_binary_build debug exit 0 elif [[ "$CI_TARGET" == "bazel.asan" ]]; then setup_clang_toolchain BAZEL_BUILD_OPTIONS+=(-c opt --copt -g "--config=clang-asan" "--build_tests_only") echo "bazel ASAN/UBSAN debug build with tests" echo "Building and testing envoy tests ${TEST_TARGETS[*]}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}" if [ "${ENVOY_BUILD_FILTER_EXAMPLE}" == "1" ]; then echo "Building and testing envoy-filter-example tests..." pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${ENVOY_FILTER_EXAMPLE_TESTS[@]}" popd fi # TODO(mattklein123): This part of the test is now flaky in CI and it's unclear why, possibly # due to sandboxing issue. Debug and enable it again. # if [ "${CI_SKIP_INTEGRATION_TEST_TRAFFIC_TAPPING}" != "1" ] ; then # Also validate that integration test traffic tapping (useful when debugging etc.) # works. This requires that we set TAP_PATH. We do this under bazel.asan to # ensure a debug build in CI. # echo "Validating integration test traffic tapping..." # bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" \ # --run_under=@envoy//bazel/test:verify_tap_test.sh \ # //test/extensions/transport_sockets/tls/integration:ssl_integration_test # fi exit 0 elif [[ "$CI_TARGET" == "bazel.tsan" ]]; then setup_clang_toolchain echo "bazel TSAN debug build with tests" echo "Building and testing envoy tests ${TEST_TARGETS[*]}" bazel_with_collection test --config=rbe-toolchain-tsan "${BAZEL_BUILD_OPTIONS[@]}" -c dbg --build_tests_only "${TEST_TARGETS[@]}" if [ "${ENVOY_BUILD_FILTER_EXAMPLE}" == "1" ]; then echo "Building and testing envoy-filter-example tests..." pushd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c dbg --config=clang-tsan "${ENVOY_FILTER_EXAMPLE_TESTS[@]}" popd fi exit 0 elif [[ "$CI_TARGET" == "bazel.msan" ]]; then ENVOY_STDLIB=libc++ setup_clang_toolchain # rbe-toolchain-msan must comes as first to win library link order. BAZEL_BUILD_OPTIONS=("--config=rbe-toolchain-msan" "${BAZEL_BUILD_OPTIONS[@]}" "-c dbg" "--build_tests_only") echo "bazel MSAN debug build with tests" echo "Building and testing envoy tests ${TEST_TARGETS[*]}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS[@]}" exit 0 elif [[ "$CI_TARGET" == "bazel.dev" ]]; then setup_clang_toolchain # This doesn't go into CI but is available for developer convenience. echo "bazel fastbuild build with tests..." echo "Building..." bazel_binary_build fastbuild echo "Building and testing ${TEST_TARGETS[*]}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild "${TEST_TARGETS[@]}" # TODO(foreseeable): consolidate this and the API tool tests in a dedicated target. bazel_with_collection //tools/envoy_headersplit:headersplit_test --spawn_strategy=local bazel_with_collection //tools/envoy_headersplit:replace_includes_test --spawn_strategy=local exit 0 elif [[ "$CI_TARGET" == "bazel.compile_time_options" ]]; then # Right now, none of the available compile-time options conflict with each other. If this # changes, this build type may need to be broken up. # TODO(mpwarres): remove quiche=enabled once QUICHE is built by default. COMPILE_TIME_OPTIONS=( "--define" "signal_trace=disabled" "--define" "hot_restart=disabled" "--define" "google_grpc=disabled" "--define" "boringssl=fips" "--define" "log_debug_assert_in_release=enabled" "--define" "quiche=enabled" "--define" "wasm=disabled" "--define" "path_normalization_by_default=true" "--define" "deprecated_features=disabled" "--define" "use_new_codecs_in_integration_tests=true" "--define" "tcmalloc=gperftools" "--define" "zlib=ng") ENVOY_STDLIB="${ENVOY_STDLIB:-libstdc++}" setup_clang_toolchain # This doesn't go into CI but is available for developer convenience. echo "bazel with different compiletime options build with tests..." if [[ "${TEST_TARGETS[*]}" == "//test/..." ]]; then cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" TEST_TARGETS=("@envoy//test/...") fi # Building all the dependencies from scratch to link them against libc++. echo "Building and testing ${TEST_TARGETS[*]}" bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c dbg "${TEST_TARGETS[@]}" --test_tag_filters=-nofips --build_tests_only # Legacy codecs "--define legacy_codecs_in_integration_tests=true" should also be tested in # integration tests with asan. bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c dbg @envoy//test/integration/... --config=clang-asan --build_tests_only # "--define log_debug_assert_in_release=enabled" must be tested with a release build, so run only # these tests under "-c opt" to save time in CI. bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c opt @envoy//test/common/common:assert_test @envoy//test/server:server_test echo "Building binary..." bazel build "${BAZEL_BUILD_OPTIONS[@]}" "${COMPILE_TIME_OPTIONS[@]}" -c dbg @envoy//source/exe:envoy-static --build_tag_filters=-nofips collect_build_profile build exit 0 elif [[ "$CI_TARGET" == "bazel.api" ]]; then setup_clang_toolchain echo "Validating API structure..." ./tools/api/validate_structure.py echo "Building API..." bazel build "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//envoy/... echo "Testing API..." bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_api_canonical//test/... @envoy_api_canonical//tools/... \ @envoy_api_canonical//tools:tap2pcap_test echo "Testing API boosting (unit tests)..." bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" -c fastbuild @envoy_dev//clang_tools/api_booster/... echo "Testing API boosting (golden C++ tests)..." # We use custom BAZEL_BUILD_OPTIONS here; the API booster isn't capable of working with libc++ yet. LLVM_CONFIG="${LLVM_ROOT}"/bin/llvm-config BAZEL_BUILD_OPTIONS="--config=clang" python3.8 ./tools/api_boost/api_boost_test.py exit 0 elif [[ "$CI_TARGET" == "bazel.coverage" || "$CI_TARGET" == "bazel.fuzz_coverage" ]]; then setup_clang_toolchain echo "${CI_TARGET} build with tests ${COVERAGE_TEST_TARGETS[*]}" [[ "$CI_TARGET" == "bazel.fuzz_coverage" ]] && export FUZZ_COVERAGE=true # We use custom BAZEL_BUILD_OPTIONS here to cover profiler's code. BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]} --define tcmalloc=gperftools" test/run_envoy_bazel_coverage.sh "${COVERAGE_TEST_TARGETS[@]}" collect_build_profile coverage exit 0 elif [[ "$CI_TARGET" == "bazel.clang_tidy" ]]; then # clang-tidy will warn on standard library issues with libc++ ENVOY_STDLIB="libstdc++" setup_clang_toolchain BAZEL_BUILD_OPTIONS="${BAZEL_BUILD_OPTIONS[*]}" NUM_CPUS=$NUM_CPUS ci/run_clang_tidy.sh "$@" exit 0 elif [[ "$CI_TARGET" == "bazel.coverity" ]]; then # Coverity Scan version 2017.07 fails to analyze the entirely of the Envoy # build when compiled with Clang 5. Revisit when Coverity Scan explicitly # supports Clang 5. Until this issue is resolved, run Coverity Scan with # the GCC toolchain. setup_gcc_toolchain echo "bazel Coverity Scan build" echo "Building..." /build/cov-analysis/bin/cov-build --dir "${ENVOY_BUILD_DIR}"/cov-int bazel build --action_env=LD_PRELOAD "${BAZEL_BUILD_OPTIONS[@]}" \ -c opt "${ENVOY_BUILD_TARGET}" # tar up the coverity results tar czvf "${ENVOY_BUILD_DIR}"/envoy-coverity-output.tgz -C "${ENVOY_BUILD_DIR}" cov-int # Copy the Coverity results somewhere that we can access outside of the container. cp -f \ "${ENVOY_BUILD_DIR}"/envoy-coverity-output.tgz \ "${ENVOY_DELIVERY_DIR}"/envoy-coverity-output.tgz exit 0 elif [[ "$CI_TARGET" == "bazel.fuzz" ]]; then setup_clang_toolchain FUZZ_TEST_TARGETS=("$(bazel query "attr('tags','fuzzer',${TEST_TARGETS[*]})")") echo "bazel ASAN libFuzzer build with fuzz tests ${FUZZ_TEST_TARGETS[*]}" echo "Building envoy fuzzers and executing 100 fuzz iterations..." bazel_with_collection test "${BAZEL_BUILD_OPTIONS[@]}" --config=asan-fuzzer "${FUZZ_TEST_TARGETS[@]}" --test_arg="-runs=10" exit 0 elif [[ "$CI_TARGET" == "fix_format" ]]; then # proto_format.sh needs to build protobuf. setup_clang_toolchain echo "fix_format..." ./tools/code_format/check_format.py fix ./tools/code_format/format_python_tools.sh fix ./tools/proto_format/proto_format.sh fix --test exit 0 elif [[ "$CI_TARGET" == "check_format" ]]; then # proto_format.sh needs to build protobuf. setup_clang_toolchain echo "check_format_test..." ./tools/code_format/check_format_test_helper.sh --log=WARN echo "check_format..." ./tools/code_format/check_shellcheck_format.sh ./tools/code_format/check_format.py check ./tools/code_format/format_python_tools.sh check ./tools/proto_format/proto_format.sh check --test exit 0 elif [[ "$CI_TARGET" == "check_repositories" ]]; then echo "check_repositories..." ./tools/check_repositories.sh exit 0 elif [[ "$CI_TARGET" == "check_spelling" ]]; then echo "check_spelling..." ./tools/spelling/check_spelling.sh check exit 0 elif [[ "$CI_TARGET" == "fix_spelling" ]];then echo "fix_spell..." ./tools/spelling/check_spelling.sh fix exit 0 elif [[ "$CI_TARGET" == "check_spelling_pedantic" ]]; then echo "check_spelling_pedantic..." ./tools/spelling/check_spelling_pedantic.py --mark check exit 0 elif [[ "$CI_TARGET" == "fix_spelling_pedantic" ]]; then echo "fix_spelling_pedantic..." ./tools/spelling/check_spelling_pedantic.py fix exit 0 elif [[ "$CI_TARGET" == "docs" ]]; then echo "generating docs..." # Validate dependency relationships between core/extensions and external deps. tools/dependency/validate_test.py tools/dependency/validate.py # Build docs. docs/build.sh exit 0 elif [[ "$CI_TARGET" == "verify_examples" ]]; then echo "verify examples..." docker load < "$ENVOY_DOCKER_BUILD_DIR/docker/envoy-docker-images.tar.xz" _images=$(docker image list --format "{{.Repository}}") while read -r line; do images+=("$line"); done \ <<< "$_images" _tags=$(docker image list --format "{{.Tag}}") while read -r line; do tags+=("$line"); done \ <<< "$_tags" for i in "${!images[@]}"; do if [[ "${images[i]}" =~ "envoy" ]]; then docker tag "${images[$i]}:${tags[$i]}" "${images[$i]}:latest" fi done docker images sudo apt-get update -y sudo apt-get install -y -qq --no-install-recommends redis-tools export DOCKER_NO_PULL=1 umask 027 ci/verify_examples.sh exit 0 else echo "Invalid do_ci.sh target, see ci/README.md for valid targets." exit 1 fi ================================================ FILE: ci/do_circle_ci.sh ================================================ #!/bin/bash set -e # Workaround for argument too long issue in protoc ulimit -s 16384 # bazel uses jgit internally and the default circle-ci .gitconfig says to # convert https://github.com to ssh://git@github.com, which jgit does not support. if [[ -e "${HOME}/.gitconfig" ]]; then mv ~/.gitconfig ~/.gitconfig_save fi # Workaround for not using ci/run_envoy_docker.sh # Create a fake home. Python site libs tries to do getpwuid(3) if we don't and the CI # Docker image gets confused as it has no passwd entry when running non-root # unless we do this. FAKE_HOME=/tmp/fake_home mkdir -p "${FAKE_HOME}" export HOME="${FAKE_HOME}" export PYTHONUSERBASE="${FAKE_HOME}" export USER=bazel ENVOY_SRCDIR="$(pwd)" export ENVOY_SRCDIR # xlarge resource_class. # See note: https://circleci.com/docs/2.0/configuration-reference/#resource_class for why we # hard code this (basically due to how docker works). export NUM_CPUS=6 # CircleCI doesn't support IPv6 by default, so we run all tests with IPv4 only. # IPv6 tests are run with Azure Pipelines. export BAZEL_BUILD_EXTRA_OPTIONS+=" \ --test_env=ENVOY_IP_TEST_VERSIONS=v4only \ --local_cpu_resources=${NUM_CPUS} \ --action_env=HOME \ --action_env=PYTHONUSERBASE \ --test_env=HOME \ --test_env=PYTHONUSERBASE" function finish { echo "disk space at end of build:" df -h } trap finish EXIT echo "disk space at beginning of build:" df -h ci/do_ci.sh "$@" ================================================ FILE: ci/do_coverity_local.sh ================================================ #!/bin/bash # # do_coverity_local.sh # # This script builds Envoy with the Coverity Scan Built Tool. # # It expects the following environment variables to be set: # COVERITY_TOKEN - set to the user's Coverity Scan project token. # COVERITY_USER_EMAIL - set to the email address used with the Coverity account. # defaults to the local git config user.email. set -e . ./ci/envoy_build_sha.sh [[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" TEST_TYPE="bazel.coverity" COVERITY_USER_EMAIL="${COVERITY_USER_EMAIL:-$(git config user.email)}" COVERITY_OUTPUT_FILE="${ENVOY_DOCKER_BUILD_DIR}"/envoy/source/exe/envoy-coverity-output.tgz if [ -n "${COVERITY_TOKEN}" ] then pushd "${ENVOY_DOCKER_BUILD_DIR}" rm -rf cov-analysis wget https://scan.coverity.com/download/linux64 --post-data "token=${COVERITY_TOKEN}&project=Envoy+Proxy" -O coverity_tool.tgz tar xvf coverity_tool.tgz mv cov-analysis-linux* cov-analysis popd else echo "ERROR: COVERITY_TOKEN is required to download and run Coverity Scan." exit 1 fi ci/run_envoy_docker.sh "ci/do_ci.sh ${TEST_TYPE}" # Check the artifact size as an approximation for determining if the scan tool was successful. if [[ $(find "${COVERITY_OUTPUT_FILE}" -type f -size +256M 2>/dev/null) ]] then echo "Uploading Coverity Scan build" curl \ --form token="${COVERITY_TOKEN}" \ --form email="${COVERITY_USER_EMAIL}" \ --form file=@"${COVERITY_OUTPUT_FILE}" \ --form version="${ENVOY_BUILD_SHA}" \ --form description="Envoy Proxy Build ${ENVOY_BUILD_SHA}" \ https://scan.coverity.com/projects/envoy-proxy else echo "Coverity Scan output file appears to be too small." echo "Not submitting build for analysis." exit 1 fi ================================================ FILE: ci/docker-entrypoint.sh ================================================ #!/usr/bin/env sh set -e loglevel="${loglevel:-}" # if the first argument look like a parameter (i.e. start with '-'), run Envoy if [ "${1#-}" != "$1" ]; then set -- envoy "$@" fi if [ "$1" = 'envoy' ]; then # set the log level if the $loglevel variable is set if [ -n "$loglevel" ]; then set -- "$@" --log-level "$loglevel" fi fi if [ "$ENVOY_UID" != "0" ]; then if [ -n "$ENVOY_UID" ]; then usermod -u "$ENVOY_UID" envoy fi if [ -n "$ENVOY_GID" ]; then groupmod -g "$ENVOY_GID" envoy fi # Ensure the envoy user is able to write to container logs chown envoy:envoy /dev/stdout /dev/stderr su-exec envoy "${@}" else exec "${@}" fi ================================================ FILE: ci/docker_ci.sh ================================================ #!/bin/bash # Do not ever set -x here, it is a security hazard as it will place the credentials below in the # CI logs. set -e ENVOY_DOCKER_IMAGE_DIRECTORY="${ENVOY_DOCKER_IMAGE_DIRECTORY:-${BUILD_STAGINGDIRECTORY:-.}/build_images}" # Setting environments for buildx tools config_env() { # Qemu configurations docker run --rm --privileged multiarch/qemu-user-static --reset -p yes # Remove older build instance docker buildx rm multi-builder || : docker buildx create --use --name multi-builder --platform linux/arm64,linux/amd64 } build_platforms() { TYPE=$1 FILE_SUFFIX="${TYPE/-debug/}" if [[ -z "${FILE_SUFFIX}" ]]; then echo "linux/arm64,linux/amd64" else echo "linux/amd64" fi } build_args() { TYPE=$1 FILE_SUFFIX="${TYPE/-debug/}" printf ' -f ci/Dockerfile-envoy%s' "${FILE_SUFFIX}" if [[ "${TYPE}" == *-debug ]]; then printf ' --build-arg ENVOY_BINARY_SUFFIX=' elif [[ "${TYPE}" == "-google-vrp" ]]; then printf ' --build-arg ENVOY_VRP_BASE_IMAGE=%s' "${VRP_BASE_IMAGE}" fi } use_builder() { TYPE=$1 if [[ "${TYPE}" == "-google-vrp" ]]; then docker buildx use default else docker buildx use multi-builder fi } IMAGES_TO_SAVE=() build_images() { local _args args=() TYPE=$1 BUILD_TAG=$2 use_builder "${TYPE}" _args=$(build_args "${TYPE}") read -ra args <<< "$_args" PLATFORM="$(build_platforms "${TYPE}")" docker buildx build --platform "${PLATFORM}" "${args[@]}" -t "${BUILD_TAG}" . PLATFORM="$(build_platforms "${TYPE}" | tr ',' ' ')" # docker buildx load cannot have multiple platform, load individually for ARCH in ${PLATFORM}; do if [[ "${ARCH}" == "linux/amd64" ]]; then IMAGE_TAG="${BUILD_TAG}" else IMAGE_TAG="${BUILD_TAG}-${ARCH/linux\//}" fi docker buildx build --platform "${ARCH}" "${args[@]}" -t "${IMAGE_TAG}" . --load IMAGES_TO_SAVE+=("${IMAGE_TAG}") done } push_images() { local _args args=() TYPE=$1 BUILD_TAG=$2 use_builder "${TYPE}" _args=$(build_args "${TYPE}") read -ra args <<< "$_args" PLATFORM="$(build_platforms "${TYPE}")" # docker buildx doesn't do push with default builder docker buildx build --platform "${PLATFORM}" "${args[@]}" -t "${BUILD_TAG}" . --push || \ docker push "${BUILD_TAG}" } MASTER_BRANCH="refs/heads/master" RELEASE_BRANCH_REGEX="^refs/heads/release/v.*" RELEASE_TAG_REGEX="^refs/tags/v.*" # For master builds and release branch builds use the dev repo. Otherwise we assume it's a tag and # we push to the primary repo. if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then IMAGE_POSTFIX="" IMAGE_NAME="${AZP_BRANCH/refs\/tags\//}" else IMAGE_POSTFIX="-dev" IMAGE_NAME="${AZP_SHA1}" fi # This prefix is altered for the private security images on setec builds. DOCKER_IMAGE_PREFIX="${DOCKER_IMAGE_PREFIX:-envoyproxy/envoy}" # "-google-vrp" must come afer "" to ensure we rebuild the local base image dependency. BUILD_TYPES=("" "-debug" "-alpine" "-alpine-debug" "-google-vrp") # Configure docker-buildx tools config_env # VRP base image is only for amd64 VRP_BASE_IMAGE="${DOCKER_IMAGE_PREFIX}${IMAGE_POSTFIX}:${IMAGE_NAME}" # Test the docker build in all cases, but use a local tag that we will overwrite before push in the # cases where we do push. for BUILD_TYPE in "${BUILD_TYPES[@]}"; do build_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" done mkdir -p "${ENVOY_DOCKER_IMAGE_DIRECTORY}" ENVOY_DOCKER_TAR="${ENVOY_DOCKER_IMAGE_DIRECTORY}/envoy-docker-images.tar.xz" echo "Saving built images to ${ENVOY_DOCKER_TAR}." docker save "${IMAGES_TO_SAVE[@]}" | xz -T0 -2 >"${ENVOY_DOCKER_TAR}" # Only push images for master builds, release branch builds, and tag builds. if [[ "${AZP_BRANCH}" != "${MASTER_BRANCH}" ]] && ! [[ "${AZP_BRANCH}" =~ ${RELEASE_BRANCH_REGEX} ]] && ! [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then echo 'Ignoring non-master branch or tag for docker push.' exit 0 fi docker login -u "$DOCKERHUB_USERNAME" -p "$DOCKERHUB_PASSWORD" for BUILD_TYPE in "${BUILD_TYPES[@]}"; do push_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" # Only push latest on master builds. if [[ "${AZP_BRANCH}" == "${MASTER_BRANCH}" ]]; then docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" push_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:latest" fi # Push vX.Y-latest to tag the latest image in a release line if [[ "${AZP_BRANCH}" =~ ${RELEASE_TAG_REGEX} ]]; then RELEASE_LINE=$(echo "$IMAGE_NAME" | sed -E 's/(v[0-9]+\.[0-9]+)\.[0-9]+/\1-latest/') docker tag "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${IMAGE_NAME}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" push_images "${BUILD_TYPE}" "${DOCKER_IMAGE_PREFIX}${BUILD_TYPE}${IMAGE_POSTFIX}:${RELEASE_LINE}" fi done ================================================ FILE: ci/docker_rebuild_google-vrp.sh ================================================ #!/bin/bash # Script to rebuild Dockerfile-envoy-google-vrp locally (i.e. not in CI) for development purposes. # This makes use of the latest envoy-dev base image on Docker Hub as the base and takes an # optional local path for an Envoy binary. When a custom local Envoy binary is used, the script # switches to using ${BASE_DOCKER_IMAGE} for the build, which should be configured to provide # compatibility with your local build environment (specifically glibc). # # Usage: # # Basic rebuild of Docker image (tagged envoy-google-vrp:local): # # ./ci/docker_rebuild_google-vrp.sh # # Basic rebuild of Docker image (tagged envoy-google-vrp:local) with some local Envoy binary: # # bazel build //source/exe:envoy-static --config=libc++ -copt # ./ci/docker_rebuild_google-vrp.sh bazel-bin/source/exe/envoy-static set -e # Don't use the local envoy-dev, but pull from Docker Hub instead, this avoids having to rebuild # this local dep which is fairly stable. BASE_DOCKER_IMAGE="envoyproxy/envoy-dev:latest" BUILD_DIR="$(mktemp -d)" declare -r BUILD_DIR cp ci/Dockerfile-envoy-google-vrp "${BUILD_DIR}" declare -r DOCKER_BUILD_FILE="${BUILD_DIR}"/Dockerfile-envoy-google-vrp # If we have a local Envoy binary, use a variant of the build environment that supports it. if [[ -n "$1" ]]; then # This should match your local machine if you are building custom Envoy binaries outside of Docker. # This provides compatibility of locally built Envoy and glibc in the Docker env. BASE_DOCKER_IMAGE="ubuntu:20.04" # Copy the binary to deal with symlinks in Bazel cache and Docker daemon confusion. declare -r LOCAL_ENVOY="envoy-binary" cp -f "$1" "${PWD}/${LOCAL_ENVOY}" sed -i -e "s@# ADD %local envoy bin%@ADD ${LOCAL_ENVOY}@" "${DOCKER_BUILD_FILE}" fi cat "${DOCKER_BUILD_FILE}" docker build -t "envoy-google-vrp:local" --build-arg "ENVOY_VRP_BASE_IMAGE=${BASE_DOCKER_IMAGE}" -f "${DOCKER_BUILD_FILE}" . if [[ -n "$1" ]]; then rm -f "${LOCAL_ENVOY}" fi rm -r "${BUILD_DIR}" ================================================ FILE: ci/envoy_build_sha.sh ================================================ #!/bin/bash ENVOY_BUILD_SHA=$(grep envoyproxy/envoy-build-ubuntu "$(dirname "$0")"/../.bazelrc | sed -e 's#.*envoyproxy/envoy-build-ubuntu:\(.*\)#\1#' | uniq) [[ $(wc -l <<< "${ENVOY_BUILD_SHA}" | awk '{$1=$1};1') == 1 ]] || (echo ".bazelrc envoyproxy/envoy-build-ubuntu hashes are inconsistent!" && exit 1) ================================================ FILE: ci/filter_example_mirror.sh ================================================ #!/bin/bash set -e ENVOY_SRCDIR=$(cd "$(dirname "${BASH_SOURCE[0]}")/../" && pwd) CHECKOUT_DIR=../envoy-filter-example if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] then echo "Cloning..." git clone git@github.com:envoyproxy/envoy-filter-example "$CHECKOUT_DIR" git -C "$CHECKOUT_DIR" config user.name "envoy-filter-example(CircleCI)" git -C "$CHECKOUT_DIR" config user.email envoy-filter-example@users.noreply.github.com git -C "$CHECKOUT_DIR" fetch git -C "$CHECKOUT_DIR" checkout -B master origin/master echo "Updating Submodule..." # Update submodule to latest Envoy SHA ENVOY_SHA=$(git rev-parse HEAD) git -C "$CHECKOUT_DIR" submodule update --init git -C "$CHECKOUT_DIR/envoy" checkout "$ENVOY_SHA" echo "Updating Workspace file." sed -e "s|{ENVOY_SRCDIR}|envoy|" "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter.example > "${CHECKOUT_DIR}"/WORKSPACE echo "Committing, and Pushing..." git -C "$CHECKOUT_DIR" commit -a -m "Update Envoy submodule to $ENVOY_SHA" git -C "$CHECKOUT_DIR" push origin master echo "Done" fi ================================================ FILE: ci/filter_example_setup.sh ================================================ #!/bin/bash # Configure environment for Envoy Filter Example build and test. set -e # This is the hash on https://github.com/envoyproxy/envoy-filter-example.git we pin to. ENVOY_FILTER_EXAMPLE_GITSHA="493e2e5bee10bbed1c3c097e09d83d7f672a9f2e" ENVOY_FILTER_EXAMPLE_SRCDIR="${BUILD_DIR}/envoy-filter-example" # shellcheck disable=SC2034 ENVOY_FILTER_EXAMPLE_TESTS=( "//:echo2_integration_test" "//http-filter-example:http_filter_integration_test" "//:envoy_binary_test") if [[ ! -d "${ENVOY_FILTER_EXAMPLE_SRCDIR}/.git" ]]; then rm -rf "${ENVOY_FILTER_EXAMPLE_SRCDIR}" git clone https://github.com/envoyproxy/envoy-filter-example.git "${ENVOY_FILTER_EXAMPLE_SRCDIR}" fi (cd "${ENVOY_FILTER_EXAMPLE_SRCDIR}" && git fetch origin && git checkout -f "${ENVOY_FILTER_EXAMPLE_GITSHA}") sed -e "s|{ENVOY_SRCDIR}|${ENVOY_SRCDIR}|" "${ENVOY_SRCDIR}"/ci/WORKSPACE.filter.example > "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/WORKSPACE mkdir -p "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel ln -sf "${ENVOY_SRCDIR}"/bazel/get_workspace_status "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/bazel/ cp -f "${ENVOY_SRCDIR}"/.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ cp -f "$(bazel info workspace)"/*.bazelrc "${ENVOY_FILTER_EXAMPLE_SRCDIR}"/ export FILTER_WORKSPACE_SET=1 ================================================ FILE: ci/flaky_test/process_xml.py ================================================ #!/usr/bin/env python3 import subprocess import os import xml.etree.ElementTree as ET import slack import sys # Check if a test suite reports failure. def checkTestStatus(file): tree = ET.parse(file) root = tree.getroot() for testsuite in root: if (testsuite.attrib['failures'] != '0'): return False return True def parseXML(file, visited): log_file = file.split('.') log_file_path = "" # This is dependent on the fact that log files reside in the same directory # as their corresponding xml files. for token in log_file[:-1]: log_file_path += token log_file_path += ".log" tree = ET.parse(file) root = tree.getroot() ret = "" # This loop is dependent on the structure of xml file emitted for test runs. # Should this change in the future, appropriate adjustments need to made. for testsuite in root: if (testsuite.attrib['failures'] != '0'): for testcase in testsuite: for failure_msg in testcase: if (testcase.attrib['name'], testsuite.attrib['name']) not in visited: ret += "-----------------------Flaky Testcase: {} in TestSuite: {} -----------------------\n".format( testcase.attrib['name'], testsuite.attrib['name']) ret += log_file_path + "\n" + failure_msg.text + "\n" visited.add((testcase.attrib['name'], testsuite.attrib['name'])) return ret # The following function links the filepath of 'test.xml' (the result for the last attempt) with # that of its 'attmpt_n.xml' file and stores it in a dictionary for easy lookup. def processFindOutput(f, problematic_tests): for line in f: lineList = line.split('/') filepath = "" for i in range(len(lineList)): if i >= len(lineList) - 2: break filepath += lineList[i] + "/" filepath += "test.xml" problematic_tests[filepath] = line.strip('\n') # Prints out helpful information on the run using Git. # Should Git changes the output of the used commands in the future, # this will likely need adjustments as well. def getGitInfo(CI_TARGET): ret = "" os.system("git remote -v > ${TMP_OUTPUT_PROCESS_XML}") os.system("git describe --all >> ${TMP_OUTPUT_PROCESS_XML}") os.system("git show >> ${TMP_OUTPUT_PROCESS_XML}") f = open(os.environ['TMP_OUTPUT_PROCESS_XML'], 'r+', encoding='utf-8') # Fetching the URL from predefined env variable envoy_link = os.environ["REPO_URI"] for line in [next(f) for x in range(6)]: if line.split('/')[0] == 'remotes': for token in line.split('/')[1:-1]: envoy_link += '/' + token ret += line ret += "link for additional content: " + envoy_link + " \n" ret += "azure build URI: " + os.environ["BUILD_URI"] + " \n" if CI_TARGET != "": ret += "In " + CI_TARGET + " build\n" return ret if __name__ == "__main__": CI_TARGET = "" if len(sys.argv) == 2: CI_TARGET = sys.argv[1] output_msg = "``` \n" has_flaky_test = False if os.getenv("TEST_TMPDIR") and os.getenv("REPO_URI") and os.getenv("BUILD_URI"): os.environ["TMP_OUTPUT_PROCESS_XML"] = os.getenv("TEST_TMPDIR") + "/tmp_output_process_xml.txt" else: print("set the env variables first") sys.exit(0) output_msg += getGitInfo(CI_TARGET) if CI_TARGET == "MacOS": os.system('find ${TEST_TMPDIR}/ -name "attempt_*.xml" > ${TMP_OUTPUT_PROCESS_XML}') else: os.system( 'find ${TEST_TMPDIR}/**/**/**/**/bazel-testlogs/ -name "attempt_*.xml" > ${TMP_OUTPUT_PROCESS_XML}' ) f = open(os.environ['TMP_OUTPUT_PROCESS_XML'], 'r+') if f.closed: print("cannot open {}".format(os.environ['TMP_OUTPUT_PROCESS_XML'])) # All output of find command should be either failed or flaky tests, as only then will # a test be rerun and have an 'attempt_n.xml' file. problematic_tests holds a lookup # table between the last_attempt xml filepath and the failed previous attempt filepath. problematic_tests = {} processFindOutput(f, problematic_tests) # Needed to make sure no duplicate flaky tests are going to be reported. visited = set() # The logic here goes as follows: If there is a test suite that has run multiple times, # which produces attempt_*.xml files, it means that the end result of that test # is either flaky or failed. So if we find that the last run of the test succeeds # we know for sure that this is a flaky test. for k in problematic_tests.keys(): if checkTestStatus(k): has_flaky_test = True output_msg += parseXML(problematic_tests[k], visited) output_msg += "``` \n" if has_flaky_test: if os.getenv("SLACK_TOKEN"): SLACKTOKEN = os.environ["SLACK_TOKEN"] client = slack.WebClient(SLACKTOKEN) client.chat_postMessage(channel='test-flaky', text=output_msg, as_user="true") else: print(output_msg) os.remove(os.environ["TMP_OUTPUT_PROCESS_XML"]) ================================================ FILE: ci/flaky_test/requirements.txt ================================================ aiohttp==3.6.2 \ --hash=sha256:1e984191d1ec186881ffaed4581092ba04f7c61582a177b187d3a2f07ed9719e \ --hash=sha256:259ab809ff0727d0e834ac5e8a283dc5e3e0ecc30c4d80b3cd17a4139ce1f326 \ --hash=sha256:2f4d1a4fdce595c947162333353d4a44952a724fba9ca3205a3df99a33d1307a \ --hash=sha256:32e5f3b7e511aa850829fbe5aa32eb455e5534eaa4b1ce93231d00e2f76e5654 \ --hash=sha256:344c780466b73095a72c616fac5ea9c4665add7fc129f285fbdbca3cccf4612a \ --hash=sha256:460bd4237d2dbecc3b5ed57e122992f60188afe46e7319116da5eb8a9dfedba4 \ --hash=sha256:4c6efd824d44ae697814a2a85604d8e992b875462c6655da161ff18fd4f29f17 \ --hash=sha256:50aaad128e6ac62e7bf7bd1f0c0a24bc968a0c0590a726d5a955af193544bcec \ --hash=sha256:6206a135d072f88da3e71cc501c59d5abffa9d0bb43269a6dcd28d66bfafdbdd \ --hash=sha256:65f31b622af739a802ca6fd1a3076fd0ae523f8485c52924a89561ba10c49b48 \ --hash=sha256:ae55bac364c405caa23a4f2d6cfecc6a0daada500274ffca4a9230e7129eac59 \ --hash=sha256:b778ce0c909a2653741cb4b1ac7015b5c130ab9c897611df43ae6a58523cb965 async-timeout==3.0.1 \ --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \ --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3 attrs==20.2.0 \ --hash=sha256:26b54ddbbb9ee1d34d5d3668dd37d6cf74990ab23c828c2888dccdceee395594 \ --hash=sha256:fce7fc47dfc976152e82d53ff92fa0407700c21acd20886a13777a0d20e655dc chardet==3.0.4 \ --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 idna==2.10 \ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 idna_ssl==1.1.0 \ --hash=sha256:a933e3bb13da54383f9e8f35dc4f9cb9eb9b3b78c6b36f311254d6d0d92c6c7c multidict==4.7.6 \ --hash=sha256:1ece5a3369835c20ed57adadc663400b5525904e53bae59ec854a5d36b39b21a \ --hash=sha256:275ca32383bc5d1894b6975bb4ca6a7ff16ab76fa622967625baeebcf8079000 \ --hash=sha256:3750f2205b800aac4bb03b5ae48025a64e474d2c6cc79547988ba1d4122a09e2 \ --hash=sha256:4538273208e7294b2659b1602490f4ed3ab1c8cf9dbdd817e0e9db8e64be2507 \ --hash=sha256:5141c13374e6b25fe6bf092052ab55c0c03d21bd66c94a0e3ae371d3e4d865a5 \ --hash=sha256:51a4d210404ac61d32dada00a50ea7ba412e6ea945bbe992e4d7a595276d2ec7 \ --hash=sha256:5cf311a0f5ef80fe73e4f4c0f0998ec08f954a6ec72b746f3c179e37de1d210d \ --hash=sha256:6513728873f4326999429a8b00fc7ceddb2509b01d5fd3f3be7881a257b8d463 \ --hash=sha256:7388d2ef3c55a8ba80da62ecfafa06a1c097c18032a501ffd4cabbc52d7f2b19 \ --hash=sha256:9456e90649005ad40558f4cf51dbb842e32807df75146c6d940b6f5abb4a78f3 \ --hash=sha256:c026fe9a05130e44157b98fea3ab12969e5b60691a276150db9eda71710cd10b \ --hash=sha256:d14842362ed4cf63751648e7672f7174c9818459d169231d03c56e84daf90b7c \ --hash=sha256:e0d072ae0f2a179c375f67e3da300b47e1a83293c554450b29c900e50afaae87 \ --hash=sha256:f07acae137b71af3bb548bd8da720956a3bc9f9a0b87733e0899226a2317aeb7 \ --hash=sha256:fbb77a75e529021e7c4a8d4e823d88ef4d23674a202be4f5addffc72cbb91430 \ --hash=sha256:fcfbb44c59af3f8ea984de67ec7c306f618a3ec771c2843804069917a8f2e255 \ --hash=sha256:feed85993dbdb1dbc29102f50bca65bdc68f2c0c8d352468c25b54874f23c39d slackclient==2.9.1 \ --hash=sha256:214edd4a494cc74353c8084ec184ff97a116d4b12cde287f805a9af948ef39ae \ --hash=sha256:3a3e84fd4f13d9715740c13ce6c3c25b970147aeeeec22ef137d796124dfcf08 typing-extensions==3.7.4.3 \ --hash=sha256:7cb407020f00f7bfc3cb3e7881628838e69d8f3fcab2f64742a5e76b2f841918 \ --hash=sha256:99d4073b617d30288f569d3f13d2bd7548c3a7e4c8de87db09a9d29bb3a4a60c \ --hash=sha256:dafc7639cde7f1b6e1acc0f457842a83e722ccca8eef5270af2d74792619a89f wheel==0.35.1 \ --hash=sha256:497add53525d16c173c2c1c733b8f655510e909ea78cc0e29d374243544b77a2 \ --hash=sha256:99a22d87add3f634ff917310a3d87e499f19e663413a52eb9232c447aa646c9f yarl==1.6.0 \ --hash=sha256:04a54f126a0732af75e5edc9addeaa2113e2ca7c6fce8974a63549a70a25e50e \ --hash=sha256:3cc860d72ed989f3b1f3abbd6ecf38e412de722fb38b8f1b1a086315cf0d69c5 \ --hash=sha256:5d84cc36981eb5a8533be79d6c43454c8e6a39ee3118ceaadbd3c029ab2ee580 \ --hash=sha256:5e447e7f3780f44f890360ea973418025e8c0cdcd7d6a1b221d952600fd945dc \ --hash=sha256:61d3ea3c175fe45f1498af868879c6ffeb989d4143ac542163c45538ba5ec21b \ --hash=sha256:67c5ea0970da882eaf9efcf65b66792557c526f8e55f752194eff8ec722c75c2 \ --hash=sha256:6f6898429ec3c4cfbef12907047136fd7b9e81a6ee9f105b45505e633427330a \ --hash=sha256:7ce35944e8e61927a8f4eb78f5bc5d1e6da6d40eadd77e3f79d4e9399e263921 \ --hash=sha256:b7c199d2cbaf892ba0f91ed36d12ff41ecd0dde46cbf64ff4bfe997a3ebc925e \ --hash=sha256:c15d71a640fb1f8e98a1423f9c64d7f1f6a3a168f803042eaf3a5b5022fde0c1 \ --hash=sha256:c22607421f49c0cb6ff3ed593a49b6a99c6ffdeaaa6c944cdda83c2393c8864d \ --hash=sha256:c604998ab8115db802cc55cb1b91619b2831a6128a62ca7eea577fc8ea4d3131 \ --hash=sha256:d088ea9319e49273f25b1c96a3763bf19a882cff774d1792ae6fba34bd40550a \ --hash=sha256:db9eb8307219d7e09b33bcb43287222ef35cbcf1586ba9472b0a4b833666ada1 \ --hash=sha256:e31fef4e7b68184545c3d68baec7074532e077bd1906b040ecfba659737df188 \ --hash=sha256:e32f0fb443afcfe7f01f95172b66f279938fbc6bdaebe294b0ff6747fb6db020 \ --hash=sha256:fcbe419805c9b20db9a51d33b942feddbf6e7fb468cb20686fd7089d4164c12a ================================================ FILE: ci/flaky_test/run_process_xml.sh ================================================ #!/bin/bash # shellcheck source=tools/shell_utils.sh . "${ENVOY_SRCDIR}"/tools/shell_utils.sh if [[ "${ENVOY_BUILD_ARCH}" == "aarch64" ]]; then export MULTIDICT_NO_EXTENSIONS=1 export YARL_NO_EXTENSIONS=1 fi python_venv process_xml "$1" ================================================ FILE: ci/flaky_test/run_process_xml_mac.sh ================================================ #!/bin/bash pip3 install slackclient ./ci/flaky_test/process_xml.py ================================================ FILE: ci/go_mirror.sh ================================================ #!/bin/bash set -e if [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] then tools/api/generate_go_protobuf.py fi ================================================ FILE: ci/mac_ci_setup.sh ================================================ #!/bin/bash # Installs the dependencies required for a macOS build via homebrew. # Tools are not upgraded to new versions. # See: # https://github.com/actions/virtual-environments/blob/master/images/macos/macos-10.15-Readme.md for # a list of pre-installed tools in the macOS image. export HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_RETRY_ATTEMPTS=10 HOMEBREW_RETRY_INTERVAL=1 function is_installed { brew ls --versions "$1" >/dev/null } function install { echo "Installing $1" if ! brew install "$1"; then echo "Failed to install $1" exit 1 fi } function retry () { local returns=1 i=1 while ((i<=HOMEBREW_RETRY_ATTEMPTS)); do if "$@"; then returns=0 break else sleep "$HOMEBREW_RETRY_INTERVAL"; ((i++)) fi done return "$returns" } if ! retry brew update; then echo "Failed to update homebrew" exit 1 fi DEPS="automake cmake coreutils go libtool wget ninja" for DEP in ${DEPS} do is_installed "${DEP}" || install "${DEP}" done if [ -n "$CIRCLECI" ]; then # bazel uses jgit internally and the default circle-ci .gitconfig says to # convert https://github.com to ssh://git@github.com, which jgit does not support. mv ~/.gitconfig ~/.gitconfig_save fi # Required as bazel and a foreign bazelisk are installed in the latest macos vm image, we have # to unlink/overwrite them to install bazelisk echo "Installing bazelisk" brew reinstall --force bazelisk if ! brew link --overwrite bazelisk; then echo "Failed to install and link bazelisk" exit 1 fi bazel version pip3 install slackclient ================================================ FILE: ci/mac_ci_steps.sh ================================================ #!/bin/bash set -e function finish { echo "disk space at end of build:" df -h } trap finish EXIT echo "disk space at beginning of build:" df -h # shellcheck source=ci/setup_cache.sh . "$(dirname "$0")"/setup_cache.sh read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:-}" read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}" # TODO(zuercher): remove --flaky_test_attempts when https://github.com/envoyproxy/envoy/issues/2428 # is resolved. BAZEL_BUILD_OPTIONS=( "--curses=no" --show_task_finish --verbose_failures "--action_env=PATH=/usr/local/bin:/opt/local/bin:/usr/bin:/bin" "--test_output=all" "--flaky_test_attempts=integration@2" "${BAZEL_BUILD_EXTRA_OPTIONS[@]}" "${BAZEL_EXTRA_TEST_OPTIONS[@]}") # Build envoy and run tests as separate steps so that failure output # is somewhat more deterministic (rather than interleaving the build # and test steps). if [[ $# -gt 0 ]]; then TEST_TARGETS=$* else TEST_TARGETS='//test/integration/...' fi if [[ "$TEST_TARGETS" == "//test/..." || "$TEST_TARGETS" == "//test/integration/..." ]]; then bazel build "${BAZEL_BUILD_OPTIONS[@]}" //source/exe:envoy-static fi bazel test "${BAZEL_BUILD_OPTIONS[@]}" "${TEST_TARGETS}" # Additionally run macOS specific test suites bazel test "${BAZEL_BUILD_OPTIONS[@]}" //test/common/network:apple_dns_impl_test ================================================ FILE: ci/repokitteh/modules/azure_pipelines.star ================================================ load("github.com/repokitteh/modules/lib/utils.star", "react") _azp_context_prefix = "ci/azp: " def _retry_azp(organization, project, build_id, token): """Makes an Azure Pipelines Build API request with retry""" url = "https://dev.azure.com/{organization}/{project}/_apis/build/builds/{buildId}?retry=true&api-version=5.1".format(organization = organization, project = project, buildId = build_id) return http(url = url, method = "PATCH", headers = { "authorization": "Basic " + token, "content-type": "application/json;odata=verbose", }) def _get_azp_checks(): github_checks = github.check_list_runs()["check_runs"] check_ids = [] checks = [] for check in github_checks: # Filter out job level GitHub check, which is not individually retriable. if check["app"]["slug"] == "azure-pipelines" and "jobId" not in check["details_url"] and check["external_id"] not in check_ids: check_ids.append(check["external_id"]) checks.append(check) return checks def _retry(config, comment_id, command): msgs = "Retrying Azure Pipelines, to retry CircleCI checks, use `/retest-circle`.\n" checks = _get_azp_checks() retried_checks = [] for check in checks: name_with_link = "[{}]({})".format(check["name"], check["details_url"]) if check["status"] != "completed": msgs += "Cannot retry non-completed check: {}, please wait.\n".format(name_with_link) elif check["conclusion"] != "failure": msgs += "Check {} didn't fail.\n".format(name_with_link) else: _, build_id, project = check["external_id"].split("|") _retry_azp("cncf", project, build_id, config["token"]) retried_checks.append(name_with_link) if len(retried_checks) == 0: react(comment_id, msgs) else: react(comment_id, None) msgs += "Retried failed jobs in: {}".format(", ".join(retried_checks)) github.issue_create_comment(msgs) handlers.command(name = "retry-azp", func = _retry) ================================================ FILE: ci/repokitteh/modules/ownerscheck.star ================================================ # Ownership specified by list of specs, like so: # # use( # "github.com/repokitteh/modules/ownerscheck.star", # paths=[ # { # "owner": "envoyproxy/api-shepherds!", # "path": "api/", # "label": "api", # "allow_global_approval": True, # "github_status_label" = "any API change", # }, # ], # ) # # This module will maintain a commit status per specified path regex (also aka as spec). # # Two types of approvals: # 1. Global approvals, done by approving the PR using Github's review approval feature. # 2. Partial approval, done by commenting "/lgtm [label]" where label is the label # associated with the path. This does not affect GitHub's PR approve status, only # this module's maintained commit status. This approval is automatically revoked # if any further changes are done to the relevant files in this spec. # # By default, 'allow_global_approval' is true and either (1) or (2) above can unblock # merges. If 'allow_global_approval' is set false, then only (2) will unblock a merge. # # 'label' refers to a GitHub label applied to any matching PR. The GitHub check status # can be customized with `github_status_label`. load("text", "match") load("github.com/repokitteh/modules/lib/utils.star", "react") def _store_partial_approval(who, files): for f in files: store_put('ownerscheck/partial/%s:%s' % (who, f['filename']), f['sha']) def _is_partially_approved(who, files): for f in files: sha = store_get('ownerscheck/partial/%s:%s' % (who, f['filename'])) if sha != f['sha']: return False return True def _get_relevant_specs(specs, changed_files): if not specs: print("no specs") return [] relevant = [] for spec in specs: path_match = spec["path"] files = [f for f in changed_files if match(path_match, f['filename'])] allow_global_approval = spec.get("allow_global_approval", True) status_label = spec.get("github_status_label", "") if files: relevant.append(struct(files=files, owner=spec["owner"], label=spec.get("label", None), path_match=path_match, allow_global_approval=allow_global_approval, status_label=status_label)) print("specs: %s" % relevant) return relevant def _get_global_approvers(): # -> List[str] (owners) reviews = [{'login': r['user']['login'], 'state': r['state']} for r in github.pr_list_reviews()] print("reviews=%s" % reviews) return [r['login'] for r in reviews if r['state'] == 'APPROVED'] def _is_approved(spec, approvers): owner = spec.owner if owner[-1] == '!': owner = owner[:-1] required = [owner] if '/' in owner: team_name = owner.split('/')[1] # this is a team, parse it. team_id = github.team_get_by_name(team_name)['id'] required = [m['login'] for m in github.team_list_members(team_id)] print("team %s(%d) = %s" % (team_name, team_id, required)) for r in required: if spec.allow_global_approval and any([a for a in approvers if a == r]): print("global approver: %s" % r) return True if _is_partially_approved(r, spec.files): print("partial approval: %s" % r) return True return False def _update_status(owner, status_label, path_match, approved): changes_to = path_match or '/' github.create_status( state=approved and 'success' or 'pending', context='%s must approve for %s' % (owner, status_label), description='changes to %s' % changes_to, ) def _get_specs(config): return _get_relevant_specs(config.get('paths', []), github.pr_list_files()) def _reconcile(config, specs=None): specs = specs or _get_specs(config) if not specs: return [] approvers = _get_global_approvers() print("approvers: %s" % approvers) results = [] for spec in specs: approved = _is_approved(spec, approvers) print("%s -> %s" % (spec, approved)) results.append((spec, approved)) if spec.owner[-1] == '!': _update_status(spec.owner[:-1], spec.status_label, spec.path_match, approved) if spec.label: if approved: github.issue_unlabel(spec.label) else: github.issue_label(spec.label) elif spec.label: # fyis github.issue_label(spec.label) return results def _comment(config, results, force=False): lines = [] for spec, approved in results: if approved: continue mention = spec.owner if mention[0] != '@': mention = '@' + mention if mention[-1] == '!': mention = mention[:-1] match_description = spec.path_match if match_description: match_description = ' for changes made to `' + match_description + '`' mode = spec.owner[-1] == '!' and 'approval' or 'fyi' key = "ownerscheck/%s/%s" % (spec.owner, spec.path_match) if (not force) and (store_get(key) == mode): mode = 'skip' else: store_put(key, mode) if mode == 'approval': lines.append('CC %s: Your approval is needed%s.' % (mention, match_description)) elif mode == 'fyi': lines.append('CC %s: FYI only%s.' % (mention, match_description)) if lines: github.issue_create_comment('\n'.join(lines)) def _reconcile_and_comment(config): _comment(config, _reconcile(config)) def _force_reconcile_and_comment(config): _comment(config, _reconcile(config), force=True) def _pr(action, config): if action in ['synchronize', 'opened']: _reconcile_and_comment(config) def _pr_review(action, review_state, config): if action != 'submitted' or not review_state: return _reconcile(config) # Partial approvals are done by commenting "/lgtm [label]". def _lgtm_by_comment(config, comment_id, command, sender, sha): labels = command.args if len(labels) != 1: react(comment_id, 'please specify a single label can be specified') return label = labels[0] specs = [s for s in _get_specs(config) if s.label and s.label == label] if len(specs) == 0: react(comment_id, 'no relevant owners for "%s"' % label) return for spec in specs: _store_partial_approval(sender, spec.files) react(comment_id, None) _reconcile(config, specs) handlers.pull_request(func=_pr) handlers.pull_request_review(func=_pr_review) handlers.command(name='checkowners', func=_reconcile) handlers.command(name='checkowners!', func=_force_reconcile_and_comment) handlers.command(name='lgtm', func=_lgtm_by_comment) ================================================ FILE: ci/run_clang_tidy.sh ================================================ #!/bin/bash set -eo pipefail # ENVOY_SRCDIR should point to where Envoy source lives, while SRCDIR could be a downstream build # (for example envoy-filter-example). [[ -z "${ENVOY_SRCDIR}" ]] && ENVOY_SRCDIR="${PWD}" [[ -z "${SRCDIR}" ]] && SRCDIR="${ENVOY_SRCDIR}" export LLVM_CONFIG=${LLVM_CONFIG:-llvm-config} LLVM_PREFIX=${LLVM_PREFIX:-$(${LLVM_CONFIG} --prefix)} CLANG_TIDY=${CLANG_TIDY:-$(${LLVM_CONFIG} --bindir)/clang-tidy} CLANG_APPLY_REPLACEMENTS=${CLANG_APPLY_REPLACEMENTS:-$(${LLVM_CONFIG} --bindir)/clang-apply-replacements} FIX_YAML=clang-tidy-fixes.yaml # Quick syntax check of .clang-tidy. ${CLANG_TIDY} -dump-config > /dev/null 2> clang-tidy-config-errors.txt if [[ -s clang-tidy-config-errors.txt ]]; then cat clang-tidy-config-errors.txt rm clang-tidy-config-errors.txt exit 1 fi rm clang-tidy-config-errors.txt echo "Generating compilation database..." # bazel build need to be run to setup virtual includes, generating files which are consumed # by clang-tidy "${ENVOY_SRCDIR}/tools/gen_compilation_database.py" --include_headers # Do not run clang-tidy against win32 impl # TODO(scw00): We should run clang-tidy against win32 impl once we have clang-cl support for Windows function exclude_win32_impl() { grep -v source/common/filesystem/win32/ | grep -v source/common/common/win32 | grep -v source/exe/win32 | grep -v source/common/api/win32 } # Do not run clang-tidy against macOS impl # TODO: We should run clang-tidy against macOS impl for completeness function exclude_macos_impl() { grep -v source/common/filesystem/kqueue/ | grep -v source/common/network/apple_dns_impl | grep -v test/common/network/apple_dns_impl_test } # Do not run incremental clang-tidy on check_format testdata files. function exclude_check_format_testdata() { grep -v tools/testdata/check_format/ } # Do not run clang-tidy on envoy_headersplit testdata files. function exclude_headersplit_testdata() { grep -v tools/envoy_headersplit/ } # Do not run clang-tidy against Chromium URL import, this needs to largely # reflect the upstream structure. function exclude_chromium_url() { grep -v source/common/chromium_url/ } # Exclude files in third_party which are temporary forks from other OSS projects. function exclude_third_party() { grep -v third_party/ } # Exclude files which are part of the Wasm emscripten environment function exclude_wasm_emscripten() { grep -v source/extensions/common/wasm/ext } # Exclude files which are part of the Wasm SDK function exclude_wasm_sdk() { grep -v proxy_wasm_cpp_sdk } # Exclude files which are part of the Wasm Host environment function exclude_wasm_host() { grep -v proxy_wasm_cpp_host } # Exclude proxy-wasm test_data. function exclude_wasm_test_data() { grep -v wasm/test_data } function filter_excludes() { exclude_check_format_testdata | exclude_headersplit_testdata | exclude_chromium_url | exclude_win32_impl | exclude_macos_impl | exclude_third_party | exclude_wasm_emscripten | exclude_wasm_sdk | exclude_wasm_host | exclude_wasm_test_data } function run_clang_tidy() { python3 "${LLVM_PREFIX}/share/clang/run-clang-tidy.py" \ -clang-tidy-binary="${CLANG_TIDY}" \ -clang-apply-replacements-binary="${CLANG_APPLY_REPLACEMENTS}" \ -export-fixes=${FIX_YAML} -j "${NUM_CPUS:-0}" -p "${SRCDIR}" -quiet \ ${APPLY_CLANG_TIDY_FIXES:+-fix} "$@" } function run_clang_tidy_diff() { git diff "$1" | filter_excludes | \ python3 "${LLVM_PREFIX}/share/clang/clang-tidy-diff.py" \ -clang-tidy-binary="${CLANG_TIDY}" \ -export-fixes="${FIX_YAML}" -j "${NUM_CPUS:-0}" -p 1 -quiet } if [[ $# -gt 0 ]]; then echo "Running clang-tidy on: $*" run_clang_tidy "$@" elif [[ "${RUN_FULL_CLANG_TIDY}" == 1 ]]; then echo "Running a full clang-tidy" run_clang_tidy else if [[ -z "${DIFF_REF}" ]]; then if [[ "${BUILD_REASON}" == "PullRequest" ]]; then DIFF_REF="remotes/origin/${SYSTEM_PULLREQUEST_TARGETBRANCH}" elif [[ "${BUILD_REASON}" == *CI ]]; then DIFF_REF="HEAD^" else DIFF_REF=$("${ENVOY_SRCDIR}"/tools/git/last_github_commit.sh) fi fi echo "Running clang-tidy-diff against ${DIFF_REF} ($(git rev-parse "${DIFF_REF}")), current HEAD ($(git rev-parse HEAD))" run_clang_tidy_diff "${DIFF_REF}" fi if [[ -s "${FIX_YAML}" ]]; then echo "clang-tidy check failed, potentially fixed by clang-apply-replacements:" cat "${FIX_YAML}" exit 1 fi ================================================ FILE: ci/run_envoy_docker.sh ================================================ #!/bin/bash set -e # shellcheck source=ci/envoy_build_sha.sh . "$(dirname "$0")"/envoy_build_sha.sh function is_windows() { [[ "$(uname -s)" == *NT* ]] } read -ra ENVOY_DOCKER_OPTIONS <<< "${ENVOY_DOCKER_OPTIONS:-}" # TODO(phlax): uppercase these env vars export HTTP_PROXY="${http_proxy:-}" export HTTPS_PROXY="${https_proxy:-}" export NO_PROXY="${no_proxy:-}" if is_windows; then [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-windows2019" # TODO(sunjayBhatia): Currently ENVOY_DOCKER_OPTIONS is ignored on Windows because # CI sets it to a Linux-specific value. Undo this once https://github.com/envoyproxy/envoy/issues/13272 # is resolved. ENVOY_DOCKER_OPTIONS=() DEFAULT_ENVOY_DOCKER_BUILD_DIR=C:/Windows/Temp/envoy-docker-build BUILD_DIR_MOUNT_DEST=C:/build # Replace MSYS style drive letter (/c/) with driver letter designation (C:/) SOURCE_DIR=$(echo "${PWD}" | sed -E "s#/([a-zA-Z])/#\1:/#") SOURCE_DIR_MOUNT_DEST=C:/source START_COMMAND=("bash" "-c" "cd source && $*") else [[ -z "${IMAGE_NAME}" ]] && IMAGE_NAME="envoyproxy/envoy-build-ubuntu" # We run as root and later drop permissions. This is required to setup the USER # in useradd below, which is need for correct Python execution in the Docker # environment. ENVOY_DOCKER_OPTIONS+=(-u root:root) ENVOY_DOCKER_OPTIONS+=(-v /var/run/docker.sock:/var/run/docker.sock) ENVOY_DOCKER_OPTIONS+=(--cap-add SYS_PTRACE --cap-add NET_RAW --cap-add NET_ADMIN) DEFAULT_ENVOY_DOCKER_BUILD_DIR=/tmp/envoy-docker-build BUILD_DIR_MOUNT_DEST=/build SOURCE_DIR="${PWD}" SOURCE_DIR_MOUNT_DEST=/source START_COMMAND=("/bin/bash" "-lc" "groupadd --gid $(id -g) -f envoygroup \ && useradd -o --uid $(id -u) --gid $(id -g) --no-create-home --home-dir /build envoybuild \ && usermod -a -G pcap envoybuild \ && sudo -EHs -u envoybuild bash -c 'cd /source && $*'") fi # The IMAGE_ID defaults to the CI hash but can be set to an arbitrary image ID (found with 'docker # images'). [[ -z "${IMAGE_ID}" ]] && IMAGE_ID="${ENVOY_BUILD_SHA}" [[ -z "${ENVOY_DOCKER_BUILD_DIR}" ]] && ENVOY_DOCKER_BUILD_DIR="${DEFAULT_ENVOY_DOCKER_BUILD_DIR}" # Replace backslash with forward slash for Windows style paths ENVOY_DOCKER_BUILD_DIR="${ENVOY_DOCKER_BUILD_DIR//\\//}" mkdir -p "${ENVOY_DOCKER_BUILD_DIR}" [[ -t 1 ]] && ENVOY_DOCKER_OPTIONS+=("-it") [[ -f .git ]] && [[ ! -d .git ]] && ENVOY_DOCKER_OPTIONS+=(-v "$(git rev-parse --git-common-dir):$(git rev-parse --git-common-dir)") export ENVOY_BUILD_IMAGE="${IMAGE_NAME}:${IMAGE_ID}" # Since we specify an explicit hash, docker-run will pull from the remote repo if missing. docker run --rm \ "${ENVOY_DOCKER_OPTIONS[@]}" \ -v "${ENVOY_DOCKER_BUILD_DIR}":"${BUILD_DIR_MOUNT_DEST}" \ -v "${SOURCE_DIR}":"${SOURCE_DIR_MOUNT_DEST}" \ -e HTTP_PROXY \ -e HTTPS_PROXY \ -e NO_PROXY \ -e BAZEL_STARTUP_OPTIONS \ -e BAZEL_BUILD_EXTRA_OPTIONS \ -e BAZEL_EXTRA_TEST_OPTIONS \ -e BAZEL_REMOTE_CACHE \ -e ENVOY_STDLIB \ -e BUILD_REASON \ -e BAZEL_REMOTE_INSTANCE \ -e GCP_SERVICE_ACCOUNT_KEY \ -e NUM_CPUS \ -e ENVOY_RBE \ -e FUZZIT_API_KEY \ -e ENVOY_BUILD_IMAGE \ -e ENVOY_SRCDIR \ -e ENVOY_BUILD_TARGET \ -e SYSTEM_PULLREQUEST_TARGETBRANCH \ -e SYSTEM_PULLREQUEST_PULLREQUESTNUMBER \ -e GCS_ARTIFACT_BUCKET \ -e BUILD_SOURCEBRANCHNAME \ -e BAZELISK_BASE_URL \ -e ENVOY_BUILD_ARCH \ -e SLACK_TOKEN \ -e BUILD_URI\ -e REPO_URI \ "${ENVOY_BUILD_IMAGE}" \ "${START_COMMAND[@]}" ================================================ FILE: ci/setup_cache.sh ================================================ #!/bin/bash set -e if [[ -n "${GCP_SERVICE_ACCOUNT_KEY:0:1}" ]]; then # mktemp will create a tempfile with u+rw permission minus umask, it will not be readable by all # users by default. GCP_SERVICE_ACCOUNT_KEY_FILE=$(mktemp -t gcp_service_account.XXXXXX.json) gcp_service_account_cleanup() { echo "Deleting service account key file..." rm -rf "${GCP_SERVICE_ACCOUNT_KEY_FILE}" } trap gcp_service_account_cleanup EXIT bash -c 'echo "${GCP_SERVICE_ACCOUNT_KEY}"' | base64 --decode > "${GCP_SERVICE_ACCOUNT_KEY_FILE}" export BAZEL_BUILD_EXTRA_OPTIONS+=" --google_credentials=${GCP_SERVICE_ACCOUNT_KEY_FILE}" fi if [[ -n "${BAZEL_REMOTE_CACHE}" ]]; then export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_cache=${BAZEL_REMOTE_CACHE}" echo "Set up bazel remote read/write cache at ${BAZEL_REMOTE_CACHE}." if [[ -n "${BAZEL_REMOTE_INSTANCE}" ]]; then export BAZEL_BUILD_EXTRA_OPTIONS+=" --remote_instance_name=${BAZEL_REMOTE_INSTANCE}" echo "instance_name: ${BAZEL_REMOTE_INSTANCE}." elif [[ -z "${ENVOY_RBE}" ]]; then export BAZEL_BUILD_EXTRA_OPTIONS+=" --jobs=HOST_CPUS*.9 --remote_timeout=600" echo "using local build cache." fi else echo "No remote cache is set, skipping setup remote cache." fi ================================================ FILE: ci/upload_gcs_artifact.sh ================================================ #!/bin/bash set -e -o pipefail if [[ -z "${GCS_ARTIFACT_BUCKET}" ]]; then echo "Artifact bucket is not set, not uploading artifacts." exit 0 fi # Fail when service account key is not specified bash -c 'echo ${GCP_SERVICE_ACCOUNT_KEY}' | base64 --decode | gcloud auth activate-service-account --key-file=- SOURCE_DIRECTORY="$1" TARGET_SUFFIX="$2" if [ ! -d "${SOURCE_DIRECTORY}" ]; then echo "ERROR: ${SOURCE_DIRECTORY} is not found." exit 1 fi BRANCH=${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER:-${BUILD_SOURCEBRANCHNAME}} GCS_LOCATION="${GCS_ARTIFACT_BUCKET}/${BRANCH}/${TARGET_SUFFIX}" echo "Uploading to gs://${GCS_LOCATION} ..." gsutil -mq rsync -dr "${SOURCE_DIRECTORY}" "gs://${GCS_LOCATION}" echo "Artifacts uploaded to: https://storage.googleapis.com/${GCS_LOCATION}/index.html" ================================================ FILE: ci/verify_examples.sh ================================================ #!/bin/bash -E TESTFILTER="${1:-*}" FAILED=() SRCDIR="${SRCDIR:-$(pwd)}" EXCLUDE_EXAMPLES=${EXCLUDED_EXAMPLES:-"wasm"} trap_errors () { local frame=0 command line sub file if [[ -n "$example" ]]; then command=" (${example})" fi set +v while read -r line sub file < <(caller "$frame"); do if [[ "$frame" -ne "0" ]]; then FAILED+=(" > ${sub}@ ${file} :${line}") else FAILED+=("${sub}@ ${file} :${line}${command}") fi ((frame++)) done set -v } trap trap_errors ERR trap exit 1 INT run_examples () { local examples example cd "${SRCDIR}/examples" || exit 1 examples=$(find . -mindepth 1 -maxdepth 1 -type d -name "$TESTFILTER" | grep -vE "${EXCLUDE_EXAMPLES}" | sort) for example in $examples; do pushd "$example" > /dev/null || return 1 ./verify.sh popd > /dev/null || return 1 done } run_examples if [[ "${#FAILED[@]}" -ne "0" ]]; then echo "TESTS FAILED:" for failed in "${FAILED[@]}"; do echo "$failed" >&2 done exit 1 fi ================================================ FILE: ci/windows_ci_steps.sh ================================================ #!/usr/bin/bash.exe set -e function finish { echo "disk space at end of build:" df -h } trap finish EXIT echo "disk space at beginning of build:" df -h # shellcheck source=ci/setup_cache.sh . "$(dirname "$0")"/setup_cache.sh read -ra BAZEL_STARTUP_OPTIONS <<< "${BAZEL_STARTUP_OPTIONS:-}" # Default to msvc-cl if not overridden read -ra BAZEL_BUILD_EXTRA_OPTIONS <<< "${BAZEL_BUILD_EXTRA_OPTIONS:---config=msvc-cl}" read -ra BAZEL_EXTRA_TEST_OPTIONS <<< "${BAZEL_EXTRA_TEST_OPTIONS:-}" # Set up TMPDIR so bash and non-bash can access # e.g. TMPDIR=/d/tmp, make a link from /d/d to /d so both bash and Windows programs resolve the # same path # This is due to this issue: https://github.com/bazelbuild/rules_foreign_cc/issues/334 # rules_foreign_cc does not currently use bazel output/temp directories by default, it uses mktemp # which respects the value of the TMPDIR environment variable drive="$(readlink -f "$TMPDIR" | cut -d '/' -f2)" if [ ! -e "/$drive/$drive" ]; then /c/windows/system32/cmd.exe /c "mklink /d $drive:\\$drive $drive:\\" fi BUILD_DIR=${BUILD_DIR:-/c/build} if [[ ! -d "${BUILD_DIR}" ]] then echo "${BUILD_DIR} mount missing - did you forget -v :${BUILD_DIR}? Creating." mkdir -p "${BUILD_DIR}" fi # Environment setup. export TEST_TMPDIR=${BUILD_DIR}/tmp [[ "${BUILD_REASON}" != "PullRequest" ]] && BAZEL_EXTRA_TEST_OPTIONS+=(--nocache_test_results) BAZEL_STARTUP_OPTIONS+=("--output_base=c:/_eb") BAZEL_BUILD_OPTIONS=( -c opt --show_task_finish --verbose_failures --define "wasm=disabled" "--test_output=errors" "${BAZEL_BUILD_EXTRA_OPTIONS[@]}" "${BAZEL_EXTRA_TEST_OPTIONS[@]}") # Also setup some space for building Envoy standalone. ENVOY_BUILD_DIR="${BUILD_DIR}"/envoy mkdir -p "${ENVOY_BUILD_DIR}" # This is where we copy build deliverables to. ENVOY_DELIVERY_DIR="${ENVOY_BUILD_DIR}"/source/exe mkdir -p "${ENVOY_DELIVERY_DIR}" # Test to validate updates of all dependency libraries in bazel/external and bazel/foreign_cc # bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //bazel/... --build_tag_filters=-skip_on_windows # Complete envoy-static build (nothing needs to be skipped, build failure indicates broken dependencies) bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //source/exe:envoy-static # Copy binary to delivery directory cp -f bazel-bin/source/exe/envoy-static.exe "${ENVOY_DELIVERY_DIR}/envoy.exe" # Copy for azp, creating a tar archive tar czf "${ENVOY_BUILD_DIR}"/envoy_binary.tar.gz -C "${ENVOY_DELIVERY_DIR}" envoy.exe # Test invocations of known-working tests on Windows bazel "${BAZEL_STARTUP_OPTIONS[@]}" test "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=-skip_on_windows,-fails_on_windows,-flaky_on_windows --build_tests_only # Build tests that are known-flaky or known-failing to ensure no compilation regressions bazel "${BAZEL_STARTUP_OPTIONS[@]}" build "${BAZEL_BUILD_OPTIONS[@]}" //test/... --test_tag_filters=-skip_on_windows,fails_on_windows,flaky_on_windows --build_tests_only # Summarize tests bypasssed to monitor the progress of porting to Windows echo "Tests bypassed as skip_on_windows: $(bazel query 'kind(".*test rule", attr("tags", "skip_on_windows", //test/...))' 2>/dev/null | sort | wc -l) known unbuildable or inapplicable tests" echo "Tests bypassed as fails_on_windows: $(bazel query 'kind(".*test rule", attr("tags", "fails_on_windows", //test/...))' 2>/dev/null | sort | wc -l) known incompatible tests" echo "Tests bypassed as flaky_on_windows: $(bazel query 'kind(".*test rule", attr("tags", "flaky_on_windows", //test/...))' 2>/dev/null | sort | wc -l) known unstable tests" ================================================ FILE: configs/BUILD ================================================ load( "//bazel:envoy_build_system.bzl", "envoy_package", ) load("@rules_python//python:defs.bzl", "py_binary") load("@configs_pip3//:requirements.bzl", "requirement") licenses(["notice"]) # Apache 2 envoy_package() py_binary( name = "configgen", srcs = ["configgen.py"], data = glob([ "*.yaml", ]), deps = [ requirement("Jinja2"), requirement("MarkupSafe"), ], ) filegroup( name = "configs", srcs = [ "google-vrp/envoy-edge.yaml", "google-vrp/envoy-origin.yaml", "original-dst-cluster/proxy_config.yaml", ] + select({ "//bazel:apple": [], "//bazel:windows_x86_64": [], "//conditions:default": ["freebind/freebind.yaml"], }), ) genrule( name = "example_configs", srcs = [ ":configs", "//examples:configs", "//docs:configs", "//test/config/integration/certs", ], outs = ["example_configs.tar"], cmd = ( "$(location configgen.sh) $(location configgen) $(@D) " + "$(locations :configs) " + "$(locations //examples:configs) " + "$(locations //docs:configs) " + "$(locations //test/config/integration/certs)" ), tools = [ "configgen.sh", ":configgen", ], ) ================================================ FILE: configs/Dockerfile ================================================ # This configuration will build a Docker container containing # an Envoy proxy that routes to Google. FROM envoyproxy/envoy-dev:latest RUN apt-get update COPY google_com_proxy.v2.yaml /etc/envoy.yaml CMD /usr/local/bin/envoy -c /etc/envoy.yaml ================================================ FILE: configs/access_log_format_helper_v2.template.yaml ================================================ {% macro ingress_sampled_log() -%} format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" {% endmacro %} {% macro ingress_full() -%} format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\"\n" {% endmacro %} {% macro egress_error_log() -%} format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\"\n" {% endmacro %} {% macro egress_error_amazon_service() -%} format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH):256% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" \"%RESP(X-AMZN-RequestId)%\"\n" {% endmacro %} ================================================ FILE: configs/configgen.py ================================================ import jinja2 import json from collections import OrderedDict import os import shutil import sys SCRIPT_DIR = os.path.dirname(__file__) OUT_DIR = sys.argv[1] # # About this script: Envoy configurations needed for a complete infrastructure are complicated. # This script demonstrates how to programatically build Envoy configurations using jinja templates. # This is roughly how we build our configurations at Lyft. The three configurations demonstrated # here (front proxy, double proxy, and service to service) are also very close approximations to # what we use at Lyft in production. They give a demonstration of how to configure most Envoy # features. Along with the configuration guide it should be possible to modify them for different # use cases. # # This is the set of internal services that front Envoy will route to. Each cluster referenced # in envoy_router.template.json must be specified here. It is a dictionary of dictionaries. # Options can be specified for each cluster if needed. See make_route_internal() in # routing_helper.template.json for the types of options supported. front_envoy_clusters = {'service1': {}, 'service2': {}, 'service3': {}, 'ratelimit': {}} # This is the set of internal services that local Envoys will route to. All services that will be # accessed via the 9001 egress port need to be listed here. It is a dictionary of dictionaries. # Options can be specified for each cluster if needed. See make_route_internal() in # routing_helper.template.json for the types of options supported. service_to_service_envoy_clusters = { 'ratelimit': {}, 'service1': { 'service_to_service_rate_limit': True }, 'service3': {} } # This is a list of external hosts that can be accessed from local Envoys. Each external service has # its own port. This is because some SDKs don't make it easy to use host based routing. Below # we demonstrate setting up proxying for DynamoDB. In the config, this ends up using the HTTP # DynamoDB statistics filter, as well as generating a special access log which includes the # X-AMZN-RequestId response header. external_virtual_hosts = [{ 'name': 'dynamodb_iad', 'address': "127.0.0.1", 'protocol': "TCP", 'port_value': "9204", 'hosts': [{ 'name': 'dynamodb_iad', 'domain': '*', 'remote_address': 'dynamodb.us-east-1.amazonaws.com', 'protocol': 'TCP', 'port_value': '443', 'verify_subject_alt_name': ['dynamodb.us-east-1.amazonaws.com'], 'ssl': True }], 'is_amzn_service': True, 'cluster_type': 'logical_dns' }] # This is the set of mongo clusters that local Envoys can talk to. Each database defines a set of # mongos routers to talk to, and whether the global rate limit service should be called for new # connections. Many organizations will not be interested in the mongo feature. Setting this to # an empty dictionary will remove all mongo configuration. The configuration is a useful example # as it demonstrates how to setup TCP proxy and the network rate limit filter. mongos_servers = { 'somedb': { 'address': "127.0.0.1", 'protocol': "TCP", 'port_value': 27019, 'hosts': [ { 'port_value': 27817, 'address': 'router1.yourcompany.net', 'protocol': 'TCP' }, { 'port_value': 27817, 'address': 'router2.yourcompany.net', 'protocol': 'TCP' }, { 'port_value': 27817, 'address': 'router3.yourcompany.net', 'protocol': 'TCP' }, { 'port_value': 27817, 'address': 'router4.yourcompany.net', 'protocol': 'TCP' }, ], 'ratelimit': True } } def generate_config(template_path, template, output_file, **context): """ Generate a final config file based on a template and some context. """ env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_path, followlinks=True), undefined=jinja2.StrictUndefined) raw_output = env.get_template(template).render(**context) with open(output_file, 'w') as fh: fh.write(raw_output) # TODO(sunjayBhatia, wrowe): Avoiding tracing extensions until they build on Windows tracing_enabled = os.name != 'nt' # Generate a demo config for the main front proxy. This sets up both HTTP and HTTPS listeners, # as well as a listener for the double proxy to connect to via SSL client authentication. generate_config(SCRIPT_DIR, 'envoy_front_proxy_v2.template.yaml', '{}/envoy_front_proxy.v2.yaml'.format(OUT_DIR), clusters=front_envoy_clusters, tracing=tracing_enabled) # Generate a demo config for the double proxy. This sets up both an HTTP and HTTPS listeners, # and backhauls the traffic to the main front proxy. generate_config(SCRIPT_DIR, 'envoy_double_proxy_v2.template.yaml', '{}/envoy_double_proxy.v2.yaml'.format(OUT_DIR), tracing=tracing_enabled) # Generate a demo config for the service to service (local) proxy. This sets up several different # listeners: # 9211: Main ingress listener for service to service traffic. # 9001: Main egress listener for service to service traffic. Applications use this port to send # requests to other services. # optional external service ports: built from external_virtual_hosts above. Each external host # that Envoy proxies to listens on its own port. # optional mongo ports: built from mongos_servers above. generate_config(SCRIPT_DIR, 'envoy_service_to_service_v2.template.yaml', '{}/envoy_service_to_service.yaml'.format(OUT_DIR), internal_virtual_hosts=service_to_service_envoy_clusters, external_virtual_hosts=external_virtual_hosts, mongos_servers=mongos_servers) for google_ext in ['v2.yaml']: shutil.copy(os.path.join(SCRIPT_DIR, 'google_com_proxy.%s' % google_ext), OUT_DIR) shutil.copy(os.path.join(SCRIPT_DIR, 'encapsulate_in_connect.v3.yaml'), OUT_DIR) shutil.copy(os.path.join(SCRIPT_DIR, 'terminate_connect.v3.yaml'), OUT_DIR) ================================================ FILE: configs/configgen.sh ================================================ #!/bin/bash set -e CONFIGGEN="$1" shift OUT_DIR="$1" shift mkdir -p "$OUT_DIR/certs" mkdir -p "$OUT_DIR/lib" "$CONFIGGEN" "$OUT_DIR" for FILE in "$@"; do case "$FILE" in *.pem) cp "$FILE" "$OUT_DIR/certs" ;; *.lua) cp "$FILE" "$OUT_DIR/lib" ;; *) FILENAME="$(echo "$FILE" | sed -e 's/.*examples\///g')" # Configuration filenames may conflict. To avoid this we use the full path. cp -v "$FILE" "$OUT_DIR/${FILENAME//\//_}" ;; esac done # tar is having issues with -C for some reason so just cd into OUT_DIR. (cd "$OUT_DIR"; tar -hcvf example_configs.tar -- *.yaml certs/*.pem lib/*.lua) ================================================ FILE: configs/encapsulate_in_connect.v3.yaml ================================================ admin: access_log_path: /tmp/admin_access.log address: socket_address: protocol: TCP address: 127.0.0.1 port_value: 9903 static_resources: listeners: - name: listener_0 address: socket_address: protocol: TCP address: 127.0.0.1 port_value: 10000 filter_chains: - filters: - name: tcp typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.tcp_proxy.v3.TcpProxy stat_prefix: tcp_stats cluster: "cluster_0" tunneling_config: hostname: host.com clusters: - name: cluster_0 connect_timeout: 5s http2_protocol_options: {} load_assignment: cluster_name: cluster_0 endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 10001 ================================================ FILE: configs/envoy_double_proxy_v2.template.yaml ================================================ {%- macro listener(protocol, address, port_value, tls, proxy_proto, tracing) -%} - name: listener_created_from_configgen address: socket_address: protocol: {{protocol}} address: {{address}} port_value: {{port_value}} filter_chains: - filter_chain_match: {} {% if tls %} transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: filename: certs/servercert.pem private_key: filename: certs/serverkey.pem validation_context: {} alpn_protocols: - h2 - http/1.1 {% endif %} {% if proxy_proto %} use_proxy_proto: true {%endif -%} filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: router route_config: name: local_route virtual_hosts: - name: local_service domains: ["*"] routes: - match: prefix: "/" route: cluster: backhaul #Generally allow front proxy to control timeout and use this as a backstop timeout: 20s http_filters: - name: envoy.filters.http.health_check typed_config: "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck pass_through_mode: false headers: - exact_match: /healthcheck name: :path - name: envoy.filters.http.buffer typed_config: "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.router typed_config: {} {% if tracing %} tracing: operation_name: INGRESS provider: name: envoy.tracers.lightstep typed_config: "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig access_token_file: "/etc/envoy/lightstep_access_token" collector_cluster: lightstep_saas {% endif %} common_http_protocol_options: idle_timeout: 840s access_log: - name: envoy.access_loggers.file filter: or_filter: filters: - status_code_filter: comparison: op: GE value: default_value: 500 runtime_key: access_log.access_error.status - duration_filter: comparison: op: GE value: default_value: 1000 runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: /var/log/envoy/access_error.log format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% if proxy_proto %} use_remote_address: true {%endif -%} {% endmacro -%} static_resources: listeners: # TCP listener for external port 443 (TLS). Assumes a TCP LB in front such as ELB which # supports proxy proto {{ listener("TCP", "0.0.0.0",9300,True, True, tracing)|indent(2) }} # TCP listener for external port 80 (non-TLS). Assumes a TCP LB in front such as ELB which # supports proxy proto. {{ listener("TCP", "0.0.0.0",9301,False, True, tracing)|indent(2) }} clusters: - name: statsd type: STATIC connect_timeout: 0.25s lb_policy: ROUND_ROBIN load_assignment: cluster_name: statsd endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 8125 protocol: TCP - name: backhaul type: STRICT_DNS connect_timeout: 1s lb_policy: ROUND_ROBIN load_assignment: cluster_name: backhaul endpoints: - lb_endpoints: - endpoint: address: socket_address: address: front-proxy.yourcompany.net port_value: 9400 protocol: TCP # There are so few connections going back # that we can get some imbalance. Until we come up # with a better solution just limit the requests # so we can cycle and get better spread. max_requests_per_connection: 25000 transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: filename: certs/clientcert.pem private_key: filename: certs/clientkey.pem validation_context: trusted_ca: filename: certs/cacert.pem match_subject_alt_names: exact: "front-proxy.yourcompany.net" http2_protocol_options: {} - name: lightstep_saas type: LOGICAL_DNS connect_timeout: 1s lb_policy: ROUND_ROBIN load_assignment: cluster_name: lightstep_saas endpoints: - lb_endpoints: - endpoint: address: socket_address: address: collector-grpc.lightstep.com port_value: 443 protocol: TCP http2_protocol_options: {} transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: filename: certs/cacert.pem match_subject_alt_names: exact: "collector-grpc.lightstep.com" flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.stat_sinks.statsd typed_config: "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink tcp_cluster_name: statsd layered_runtime: layers: - name: root disk_layer: symlink_root: /srv/configset/envoydata/current subdirectory: envoy - name: override disk_layer: symlink_root: /srv/configset/envoydata/current subdirectory: envoy_override append_service_cluster: true - name: admin admin_layer: {} admin: access_log_path: "/var/log/envoy/admin_access.log" address: socket_address: protocol: TCP address: 127.0.0.1 port_value: 9901 ================================================ FILE: configs/envoy_front_proxy_v2.template.yaml ================================================ {% import 'routing_helper_v2.template.yaml' as helper -%} {% macro router_file_content() -%}{% include kwargs['router_file'] -%}{% endmacro -%} {% macro listener(protocol, address, port_value, proxy_proto, tls, tracing) -%} name: not_required_for_static_listeners address: socket_address: protocol: {{protocol}} address: {{address}} port_value: {{port_value}} filter_chains: {% if tls %} - transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.api.v2.auth.DownstreamTlsContext common_tls_context: alpn_protocols: h2,http/1.1 tls_certificates: - certificate_chain: filename: certs/servercert.pem private_key: filename: certs/serverkey.pem {% if kwargs.get('pin_double_proxy_client', False) %} validation_context: trusted_ca: filename: certs/cacert.pm #This should be the hash of the /etc/envoy/envoy-double-proxy.pem cert used in the #double proxy configuration. verify_certificate_hash: "0000000000000000000000000000000000000000000000000000000000000000" {% endif %} {%if proxy_proto%} use_proxy_proto: true {%endif%} {%endif %} filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: router {% if proxy_proto -%} use_remote_address: true {%endif-%} stat_prefix: ingress_http route_config: {{ router_file_content(router_file='envoy_router_v2.template.yaml')|indent(10) }} http_filters: - name: envoy.filters.http.health_check typed_config: "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck pass_through_mode: false headers: - name: ":path" exact_match: "/healthcheck" - name: envoy.filters.http.buffer typed_config: "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.ratelimit typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit domain: envoy_front request_type: external rate_limit_service: grpc_service: envoy_grpc: cluster_name: ratelimit - name: envoy.filters.http.router typed_config: {} add_user_agent: true {% if tracing %} tracing: operation_name: INGRESS provider: name: envoy.tracers.lightstep typed_config: "@type": type.googleapis.com/envoy.config.trace.v2.LightstepConfig collector_cluster: lightstep_saas access_token_file: "/etc/envoy/lightstep_access_token" {% endif %} common_http_protocol_options: idle_timeout: 840s access_log: - name: envoy.access_loggers.file filter: or_filter: filters: - status_code_filter: comparison: op: GE value: default_value: 500 runtime_key: access_log.access_error.status - duration_filter: comparison: op: GE value: default_value: 1000 runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/access_error.log" format: "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" \"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%REQ(X-LYFT-USER-ID)%\" \"%RESP(GRPC-STATUS)%\"\n" {% endmacro -%} static_resources: listeners: # TCP listeners for public HTTP/HTTPS endpoints. Assumes a TCP LB in front such as ELB which # supports proxy proto. - {{ listener("TCP", "0.0.0.0", "9300", True, True, tracing)|indent(2) }} - {{ listener("TCP", "0.0.0.0", "9301", True, True, tracing)|indent(2) }} # TCP listener for backhaul traffic from the double proxy. # See envoy_double_proxy.template.json - {{ listener("TCP", "0.0.0.0", "9400", True, True, tracing, pin_double_proxy_client=True)|indent(2) }} clusters: - name: sds type: STRICT_DNS connect_timeout: 0.25s lb_policy: ROUND_ROBIN load_assignment: cluster_name: sds endpoints: - lb_endpoints: - endpoint: address: socket_address: address: discovery.yourcompany.net port_value: 80 protocol: TCP - name: statsd type: STATIC connect_timeout: 0.25s lb_policy: ROUND_ROBIN load_assignment: cluster_name: statsd endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 8125 protocol: TCP - name: lightstep_saas type: LOGICAL_DNS connect_timeout: 1s lb_policy: ROUND_ROBIN load_assignment: cluster_name: lightstep_saas endpoints: - lb_endpoints: - endpoint: address: socket_address: address: collector-grpc.lightstep.com port_value: 443 protocol: TCP http2_protocol_options: {} {% for service, options in clusters.items() -%} - {{ helper.internal_cluster_definition(service, options)|indent(2) }} {% endfor %} cluster_manager: outlier_detection: event_log_path: /var/log/envoy/outlier_events.log flags_path: /etc/envoy/flags layered_runtime: layers: - name: root disk_layer: symlink_root: /srv/configset/envoydata/current subdirectory: envoy - name: override disk_layer: symlink_root: /srv/configset/envoydata/current subdirectory: envoy_override append_service_cluster: true - name: admin admin_layer: {} admin: access_log_path: /var/log/envoy/admin_access.log address: socket_address: protocol: TCP address: 0.0.0.0 port_value: 9901 ================================================ FILE: configs/envoy_router_v2.template.yaml ================================================ {% import 'routing_helper_v2.template.yaml' as helper with context -%} name: local_route virtual_hosts: - name: www domains: - www.yourcompany.com routes: - match: prefix: "/foo/bar" runtime_fraction: default_value: numerator: 0 denominator: HUNDRED runtime_key: routing.www.use_service_2 route: {{ helper.make_route('service2')|indent(4) }} - match: prefix: "/" route: {{ helper.make_route('service1')|indent(4) }} require_tls: ALL rate_limits: actions: remote_address: {} - name: www_redirect domains: - wwww.yourcompany.net routes: - match: prefix: "/" redirect: host_redirect: www.yourcompany.net require_tls: ALL rate_limits: - actions: remote_address: {} - name: api domains: - api.yourcompany.net routes: - match: path: "/foo/bar" route: {{ helper.make_route('service3')|indent(4) }} - match: prefix: "/" route: {{ helper.make_route('service1')|indent(4) }} require_tls: EXTERNAL_ONLY rate_limits: - actions: remote_address: {} ================================================ FILE: configs/envoy_service_to_service_v2.template.yaml ================================================ {% import 'routing_helper_v2.template.yaml' as helper -%} {% import 'access_log_format_helper_v2.template.yaml' as access_log_helper -%} {% macro ingress_listener(protocol, address, port_value) -%} - address: socket_address: protocol: {{protocol}} address: {{address}} port_value: {{port_value}} traffic_direction: INBOUND filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: ingress_http route_config: name: local_route virtual_hosts: - name: local_service domains: - "*" routes: - match: prefix: "/" headers: - name: content-type exact_match: application/grpc route: cluster: local_service_grpc - match: prefix: "/" route: cluster: local_service http_filters: - name: envoy.filters.http.health_check typed_config: "@type": type.googleapis.com/envoy.config.filter.http.health_check.v2.HealthCheck pass_through_mode: true headers: - name: ":path" exact_match: "/healthcheck" cache_time: 2.5s - name: envoy.filters.http.buffer typed_config: "@type": type.googleapis.com/envoy.config.filter.http.buffer.v2.Buffer max_request_bytes: 5242880 - name: envoy.filters.http.router typed_config: {} access_log: - name: envoy.access_loggers.file filter: not_health_check_filter: {} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/ingress_http.log" {{ access_log_helper.ingress_full()|indent(10)}} - name: envoy.access_loggers.file filter: and_filter: filters: - or_filter: filters: - status_code_filter: comparison: op: GE value: default_value: 400 runtime_key: access_log.access_error.status - status_code_filter: comparison: op: EQ value: default_value: 0 runtime_key: access_log.access_error.status - duration_filter: comparison: op: GE value: default_value: 2000 runtime_key: access_log.access_error.duration - not_health_check_filter: {} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/ingress_http_error.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} - name: envoy.access_loggers.file filter: and_filter: filters: - not_health_check_filter: {} - runtime_filter: runtime_key: access_log.ingress_http typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/ingress_http_sampled.log" {{ access_log_helper.ingress_sampled_log()|indent(10)}} common_http_protocol_options: idle_timeout: 840s {% endmacro -%} static_resources: listeners: {{ ingress_listener("tcp", "0.0.0.0", 9211) | indent(2)}} - address: socket_address: protocol: TCP port_value: 9001 address: 127.0.0.1 traffic_direction: OUTBOUND filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http route_config: name: local_route virtual_hosts: {% for service, options in internal_virtual_hosts.items() %} - name: {{ service }} domains: - {{ service }} routes: - match: prefix: "/" route: {{ helper.make_route_internal(service, options)|indent(16) }} {% endfor %} add_user_agent: true common_http_protocol_options: idle_timeout: 840s access_log: - name: envoy.access_loggers.file filter: or_filter: filters: - status_code_filter: comparison: op: GE value: default_value: 400 runtime_key: access_log.access_error.status - duration_filter: comparison: op: GE value: default_value: 2000 runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/egress_http_error.log" {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true http_filters: - name: envoy.filters.http.ratelimit typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit domain: envoy_service_to_service rate_limit_service: grpc_service: envoy_grpc: cluster_name: ratelimit - name: envoy.filters.http.grpc_http1_bridge typed_config: {} - name: envoy.filters.http.router typed_config: {} - address: socket_address: protocol: TCP port_value: 9002 address: 127.0.0.1 traffic_direction: OUTBOUND filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO stat_prefix: egress_http rds: config_source: api_config_source: api_type: GRPC grpc_services: envoy_grpc: cluster_name: "rds" route_config_name: rds_config_for_listener_1 add_user_agent: true common_http_protocol_options: idle_timeout: 840s access_log: - name: envoy.access_loggers.file filter: or_filter: filters: - status_code_filter: comparison: op: GE value: default_value: 400 runtime_key: access_log.access_error.status - duration_filter: comparison: op: GE value: default_value: 2000 runtime_key: access_log.access_error.duration - traceable_filter: {} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/egress_http_error.log" {{ access_log_helper.egress_error_log()|indent(10) }} use_remote_address: true http_filters: - name: envoy.filters.http.ratelimit typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit domain: envoy_service_to_service rate_limit_service: grpc_service: envoy_grpc: cluster_name: ratelimit - name: envoy.filters.http.grpc_http1_bridge typed_config: {} - name: envoy.filters.http.router typed_config: {} {% if external_virtual_hosts|length > 0 or mongos_servers|length > 0 %}{% endif -%} {% for mapping in external_virtual_hosts -%} - name: "{{ mapping['address']}}" address: socket_address: address: "{{ mapping['address'] }}" protocol: TCP port_value: 9901 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager codec_type: AUTO common_http_protocol_options: idle_timeout: 840s stat_prefix: egress_{{ mapping['name'] }} #update access_logs here route_config: virtual_hosts: {% for host in mapping['hosts'] %} - name: egress_{{ host['name'] }} domains: - "{{ host['domain'] }}" routes: - match: prefix: "/" route: cluster: egress_{{ host['name'] }} retry_policy: retry_on: connect-failure {% if host.get('host_rewrite', False) %} host_rewrite: "{{host['host_rewrite']}}" {% endif %} {% endfor %} http_filters: {% if mapping['name'] in ['dynamodb_iad', 'dynamodb_legacy'] -%} - name: envoy.filters.http.dynamo typed_config: {} {% endif -%} - name: envoy.filters.http.router typed_config: {} access_log: - name: envoy.access_loggers.file filter: or_filter: filters: - status_code_filter: comparison: op: GE value: default_value: 400 runtime_key: access_log.access_error.status - status_code_filter: comparison: op: EQ value: default_value: 0 runtime_key: access_log.access_error.status {% if mapping.get('log_high_latency_requests', True) %} - duration_filter: comparison: op: GE value: default_value: 2000 runtime_key: access_log.access_error.duration {% endif %} typed_config: "@type": type.googleapis.com/envoy.config.accesslog.v2.FileAccessLog path: "/var/log/envoy/egress_{{ mapping['name'] }}_http_error.log" {% if mapping.get('is_amzn_service', False) -%} {{ access_log_helper.egress_error_amazon_service()|indent(10) }} {% else -%} {{ access_log_helper.egress_error_log()|indent(10) }} {% endif %} {% if (mongos_servers|length > 0) or (mongos_servers|length == 0 and not loop.last ) %}{% endif -%} {% endfor -%} {% for key, value in mongos_servers.items() -%} - name : "{{ value['address'] }}" address: socket_address: address: "{{ value['address'] }}" protocol: TCP port_value: 9003 filter_chains: - filters: - name: envoy.filters.network.tcp_proxy typed_config: "@type": type.googleapis.com/envoy.config.filter.network.tcp_proxy.v2.TcpProxy stat_prefix: mongo_{{ key }} cluster: mongo_{{ key }} - name: envoy.filters.network.mongo_proxy typed_config: "@type": type.googleapis.com/envoy.config.filter.network.mongo_proxy.v2.MongoProxy stat_prefix: "{{ key }}" access_log: "/var/log/envoy/mongo_{{ key }}.log" {% if value.get('ratelimit', False) %} - name: envoy.filters.network.ratelimit typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.ratelimit.v3.RateLimit stat_prefix: "{{ key }}" domain: envoy_mongo_cps descriptors: entries: - key: database value: "{{ key }}" {% endif %} {% endfor -%} clusters: {% for service, options in internal_virtual_hosts.items() -%} - {{ helper.internal_cluster_definition(service, options)|indent(2)}} {% endfor -%} {% for mapping in external_virtual_hosts -%} {% for host in mapping['hosts'] -%} - name: egress_{{ host['name'] }} {% if host.get('ssl', False) %} transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: filename: certs/cacert.pem {% if host.get('verify_subject_alt_name', False) %} match_subject_alt_names: exact: "{{host['verify_subject_alt_name'] }}" {% endif %} {% if host.get('sni', False) %} sni: "{{ host['sni'] }}" {% endif %} connect_timeout: 1s {% else %} connect_timeout: 0.25s {% endif %} type: LOGICAL_DNS lb_policy: ROUND_ROBIN load_assignment: cluster_name: egress_{{ host['name'] }} endpoints: - lb_endpoints: - endpoint: address: socket_address: address: {{ host['remote_address'] }} port_value: {{ host['port_value'] }} protocol: {{ host['protocol'] }} {% endfor -%} {% endfor -%} {% for key, value in mongos_servers.items() -%} - name: mongo_{{ key }} connect_timeout: 0.25s type: STRICT_DNS lb_policy: RANDOM load_assignment: cluster_name: mongo_{{ key }} endpoints: - lb_endpoints: {% for server in value['hosts'] -%} - endpoint: address: socket_address: address: {{ server['address'] }} port_value: {{ server['port_value'] }} protocol: {{ server['protocol'] }} {% endfor -%} {% endfor %} - name: main_website connect_timeout: 0.25s type: LOGICAL_DNS # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN load_assignment: cluster_name: main_website endpoints: - lb_endpoints: - endpoint: address: socket_address: address: main_website.com port_value: 443 protocol: TCP transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext sni: www.main_website.com - name: local_service connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN load_assignment: cluster_name: main_website endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 8080 protocol: TCP circuit_breakers: thresholds: max_pending_requests: 30 max_connections: 100 - name: local_service_grpc connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN http2_protocol_options: {} load_assignment: cluster_name: local_service_grpc endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 8081 protocol: TCP circuit_breakers: thresholds: max_requests: 200 dns_lookup_family: V4_ONLY - name: rds connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN http2_protocol_options: connection_keepalive: interval: 30s timeout: 5s load_assignment: cluster_name: rds endpoints: - lb_endpoints: - endpoint: address: socket_address: address: rds.yourcompany.net port_value: 80 protocol: TCP dns_lookup_family: V4_ONLY - name: statsd connect_timeout: 0.25s type: STATIC lb_policy: ROUND_ROBIN load_assignment: cluster_name: statsd endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 8125 protocol: TCP dns_lookup_family: V4_ONLY - name: lightstep_saas connect_timeout: 1s type: LOGICAL_DNS lb_policy: ROUND_ROBIN load_assignment: cluster_name: lightstep_saas endpoints: - lb_endpoints: - endpoint: address: socket_address: address: collector-grpc.lightstep.com port_value: 443 protocol: TCP http2_protocol_options: max_concurrent_streams: 100 transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext common_tls_context: validation_context: trusted_ca: filename: certs/cacert.pem match_subject_alt_names: exact: "collector-grpc.lightstep.com" - name: cds_cluster connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN load_assignment: cluster_name: cds_cluster endpoints: - lb_endpoints: - endpoint: address: socket_address: address: cds.yourcompany.net port_value: 80 protocol: TCP - name: sds connect_timeout: 0.25s type: STRICT_DNS lb_policy: ROUND_ROBIN load_assignment: cluster_name: sds endpoints: - lb_endpoints: - endpoint: address: socket_address: address: discovery.yourcompany.net port_value: 80 protocol: TCP dynamic_resources: cds_config: api_config_source: api_type: REST cluster_names: - cds_cluster refresh_delay: 30s cluster_manager: {} flags_path: "/etc/envoy/flags" stats_sinks: - name: envoy.stat_sinks.statsd typed_config: "@type": type.googleapis.com/envoy.config.metrics.v2.StatsdSink tcp_cluster_name: statsd layered_runtime: layers: - name: root disk_layer: symlink_root: /srv/configset/envoydata/current subdirectory: envoy - name: override disk_layer: symlink_root: /srv/configset/envoydata/current subdirectory: envoy_override append_service_cluster: true - name: admin admin_layer: {} admin: access_log_path: /var/log/envoy/admin_access.log address: socket_address: protocol: TCP address: 0.0.0.0 port_value: 9901 ================================================ FILE: configs/freebind/README.md ================================================ # Freebind testing To manually validate the `IP_FREEBIND` behavior in Envoy, you can launch Envoy with [freebind.yaml](freebind.yaml). The listener free bind behavior can be verified with: 1. `envoy -c ./configs/freebind/freebind.yaml -l trace` 2. `sudo ifconfig lo:1 192.168.42.1/30 up` 3. `nc -v -l 0.0.0.0 10001` To cleanup run `sudo ifconfig lo:1 down`. TODO(htuch): Steps to verify upstream behavior. ================================================ FILE: configs/freebind/freebind.yaml ================================================ admin: access_log_path: /tmp/admin_access.log address: socket_address: address: 127.0.0.1 port_value: 9901 static_resources: listeners: - name: listener_0 address: socket_address: address: 192.168.42.1 port_value: 10000 freebind: true filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route virtual_hosts: - name: local_service domains: ["*"] routes: - match: { prefix: "/" } route: { cluster: service_local } http_filters: - name: envoy.filters.http.router clusters: - name: service_local connect_timeout: 30s type: STATIC lb_policy: ROUND_ROBIN load_assignment: cluster_name: service_local endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 10001 # TODO(htuch): Figure out how to do end-to-end testing with # outgoing connections and free bind. # upstream_bind_config: # source_address: # address: 192.168.43.1 # freebind: true ================================================ FILE: configs/google-vrp/envoy-edge.yaml ================================================ overload_manager: refresh_interval: 0.25s resource_monitors: - name: "envoy.resource_monitors.fixed_heap" typed_config: "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig # TODO: Tune for your system. max_heap_size_bytes: 1073741824 # 1 GiB actions: - name: "envoy.overload_actions.shrink_heap" triggers: - name: "envoy.resource_monitors.fixed_heap" threshold: value: 0.90 - name: "envoy.overload_actions.stop_accepting_requests" triggers: - name: "envoy.resource_monitors.fixed_heap" threshold: value: 0.95 static_resources: listeners: - name: listener_https address: socket_address: protocol: TCP address: 0.0.0.0 port_value: 10000 per_connection_buffer_limit_bytes: 32768 # 32 KiB filter_chains: - transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext common_tls_context: tls_certificates: - certificate_chain: { filename: "certs/servercert.pem" } private_key: { filename: "certs/serverkey.pem" } # Uncomment if Envoy is behind a load balancer that exposes client IP address using the PROXY protocol. # use_proxy_proto: true filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http use_remote_address: true common_http_protocol_options: idle_timeout: 3600s # 1 hour headers_with_underscores_action: REJECT_REQUEST http2_protocol_options: max_concurrent_streams: 100 initial_stream_window_size: 65536 # 64 KiB initial_connection_window_size: 1048576 # 1 MiB stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests route_config: name: local_route virtual_hosts: - name: local_service domains: ["*"] # The exact route table is not super important in this example (this is the model # for the Google VRP scenario). routes: - match: prefix: "/content" route: cluster: service_foo idle_timeout: 15s # must be disabled for long-lived and streaming requests - match: prefix: "/" direct_response: status: 403 body: inline_string: "denied\n" http_filters: - name: envoy.filters.http.router clusters: name: service_foo connect_timeout: 5s per_connection_buffer_limit_bytes: 32768 # 32 KiB load_assignment: cluster_name: service_foo endpoints: - lb_endpoints: - endpoint: address: socket_address: address: 127.0.0.1 port_value: 10002 http2_protocol_options: initial_stream_window_size: 65536 # 64 KiB initial_connection_window_size: 1048576 # 1 MiB ================================================ FILE: configs/google-vrp/envoy-origin.yaml ================================================ overload_manager: refresh_interval: 0.25s resource_monitors: - name: "envoy.resource_monitors.fixed_heap" typed_config: "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig max_heap_size_bytes: 1073741824 # 1 GiB actions: - name: "envoy.overload_actions.shrink_heap" triggers: - name: "envoy.resource_monitors.fixed_heap" threshold: value: 0.95 - name: "envoy.overload_actions.stop_accepting_requests" triggers: - name: "envoy.resource_monitors.fixed_heap" threshold: value: 0.98 static_resources: listeners: - name: listener_0 address: socket_address: protocol: TCP address: 0.0.0.0 port_value: 10002 per_connection_buffer_limit_bytes: 32768 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http use_remote_address: true common_http_protocol_options: idle_timeout: 3600s # 1 hour headers_with_underscores_action: REJECT_REQUEST http2_protocol_options: max_concurrent_streams: 100 initial_stream_window_size: 65536 # 64 KiB initial_connection_window_size: 1048576 # 1 MiB stream_idle_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests request_timeout: 300s # 5 mins, must be disabled for long-lived and streaming requests route_config: name: local_route virtual_hosts: - name: local_service domains: ["*"] routes: - match: path: "/blockedz" direct_response: status: 200 body: inline_string: "hidden treasure\n" - match: prefix: "/" direct_response: status: 200 body: inline_string: "normal\n" http_filters: - name: envoy.filters.http.router ================================================ FILE: configs/google-vrp/launch_envoy.sh ================================================ #!/bin/bash cd /etc/envoy || exit envoy "$@" ================================================ FILE: configs/google-vrp/supervisor.conf ================================================ [supervisord] nodaemon=true [program:envoy-edge] command=launch_envoy.sh -c /etc/envoy/envoy-edge.yaml %(ENV_ENVOY_EDGE_EXTRA_ARGS)s --log-format "(edge)[%%Y-%%m-%%d %%T.%%e][%%t][%%l][%%n] %%v" --base-id 0 redirect_stderr=true stdout_logfile_maxbytes=0 stdout_logfile=/dev/stdout [program:envoy-origin] command=launch_envoy.sh -c /etc/envoy/envoy-origin.yaml %(ENV_ENVOY_ORIGIN_EXTRA_ARGS)s --log-format "(origin)[%%Y-%%m-%%d %%T.%%e][%%t][%%l][%%n] %%v" --base-id 1 redirect_stderr=true stdout_logfile_maxbytes=0 stdout_logfile=/dev/stdout ================================================ FILE: configs/google_com_proxy.v2.yaml ================================================ admin: access_log_path: /tmp/admin_access.log address: socket_address: protocol: TCP address: 127.0.0.1 port_value: 9901 static_resources: listeners: - name: listener_0 address: socket_address: protocol: TCP address: 0.0.0.0 port_value: 10000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route virtual_hosts: - name: local_service domains: ["*"] routes: - match: prefix: "/" route: host_rewrite: www.google.com cluster: service_google http_filters: - name: envoy.filters.http.router clusters: - name: service_google connect_timeout: 30s type: LOGICAL_DNS # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN load_assignment: cluster_name: service_google endpoints: - lb_endpoints: - endpoint: address: socket_address: address: www.google.com port_value: 443 transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext sni: www.google.com ================================================ FILE: configs/original-dst-cluster/README.md ================================================ # Original destination cluster configuration and testing An original destination cluster forwards requests to the same destination the request was going to before being redirected to Envoy using an iptables REDIRECT rule. `proxy_config.json` contains an example Envoy configuration demonstrating the use of an original destination cluster. `netns_setup.sh` and `netns_cleanup.sh` are provided as examples for setting up and cleaning up, respectively, a network namespace and the required iptables rule to redirect traffic to Envoy. # Setting up `netns_setup.sh` takes two arguments: the name of the new network namespace and the prefix that is to be redirected. Envoy listener port is set to 10000, which matches the configuration in `proxy_config.json`. This creates a network namespace `ns1` and redirects traffic from there to Envoy listening on port 10000 if the destination address of the traffic matches `173.194.222.0/24` : ``` sudo ./configs/original-dst-cluster/netns_setup.sh ns1 173.194.222.0/24 ``` # Building and running Envoy Build Envoy with debug options, so that the behavior can be better observed from the logs: ``` bazel build //source/exe:envoy-static -c dbg ``` Then you should run Envoy with the provided example configuration: ``` bazel-out/local-dbg/bin/source/exe/envoy-static -c configs/original-dst-cluster/proxy_config.json -l debug ``` When running you should see periodical messages like `Cleaning up stale original dst hosts.` # Generating traffic Next we generate traffic from the new network namespace hitting the redirect rule. Run this from another terminal: ``` sudo ip netns exec ns1 curl -v 173.194.222.106:80 ``` Most likely you'll see `301 Moved` in the curl response. In the rare case of upstream connection timeout you'll see `503 Service Unavailable` instead. The connection timeout setting on the proxy_config.json is set to 6 seconds to make this less likely, but if no host with the destination address exist then you will get this response no matter how long the timeout setting. You should see lines with `Adding host 173.194.222.106:80` being logged by each Envoy thread, followed by `Keeping active host 173.194.222.106:80` and eventually `Removing stale host 173.194.222.106:80`, again multiple times, once from each Envoy thread. # Cleaning up To properly remove the added network namespace and the iptables configuration run `netns_cleanup.sh` with the same arguments as the setup before: ``` sudo ./configs/original-dst-cluster/netns_cleanup.sh ns1 173.194.222.0/24 ``` Finally, stop Envoy with `^C`. ================================================ FILE: configs/original-dst-cluster/netns_cleanup.sh ================================================ #!/usr/bin/env bash # # Cleanup network namespace after testing Envoy original_dst cluster # NETNS=$1 TARGET_IP=$2 ENVOY_PORT=10000 # remove iptables rule iptables -t nat -D PREROUTING --src 0/0 --dst "$TARGET_IP" -p tcp --dport 80 -j REDIRECT --to-ports "$ENVOY_PORT" # delete network namespace ip netns delete "$NETNS" # delete veth pair ip link del "$NETNS-veth0" type veth peer name "$NETNS-veth1" ================================================ FILE: configs/original-dst-cluster/netns_setup.sh ================================================ #!/usr/bin/env bash # # Example setup network namespace for testing Envoy original_dst cluster # Clean up with the cleanup script with the same arguments. # # Test with: # $sudo ip netns exec ${NETNS} curl -v ${TARGET_IP}:80 # set -e # name of the network namespace NETNS=$1 # IP address or prefix that will be redirected TARGET_IP=$2 # Local Envoy Listener port number ENVOY_PORT=10000 # Create veth pair ip link add "$NETNS-veth0" type veth peer name "$NETNS-veth1" ifconfig "$NETNS-veth0" 10.0.200.2/24 up # Create network namespace ip netns add "$NETNS" # Move veth peer to the namespace ip link set "$NETNS-veth1" netns "$NETNS" # Configure network namespace ip netns exec "$NETNS" ifconfig lo 127.0.0.1 up ip netns exec "$NETNS" ifconfig "$NETNS-veth1" 10.0.200.1/24 up ip netns exec "$NETNS" ip route add default via 10.0.200.2 #configure iptables REDIRECT in the PREROUTING hook of the root name space nat table. iptables -t nat -I PREROUTING --src 0/0 --dst "$TARGET_IP" -p tcp --dport 80 -j REDIRECT --to-ports "$ENVOY_PORT" ================================================ FILE: configs/original-dst-cluster/proxy_config.yaml ================================================ static_resources: listeners: - address: socket_address: address: 0.0.0.0 port_value: 10000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_service virtual_hosts: - name: backend domains: - "*" routes: - match: prefix: "/" route: cluster: cluster1 http_filters: - name: envoy.filters.http.router typed_config: {} codec_type: auto listener_filters: - name: envoy.filters.listener.original_dst typed_config: {} clusters: - name: cluster1 type: ORIGINAL_DST connect_timeout: 6s lb_policy: CLUSTER_PROVIDED dns_lookup_family: V4_ONLY cluster_manager: {} admin: access_log_path: /tmp/admin_access.log address: socket_address: address: 127.0.0.1 port_value: 9901 ================================================ FILE: configs/requirements.txt ================================================ Jinja2==2.11.2 \ --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \ --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035 MarkupSafe==1.1.1 \ --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \ --hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \ --hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \ --hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \ --hash=sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42 \ --hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \ --hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \ --hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \ --hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \ --hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \ --hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \ --hash=sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b \ --hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \ --hash=sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15 \ --hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \ --hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \ --hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \ --hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \ --hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \ --hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \ --hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \ --hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \ --hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \ --hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \ --hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \ --hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \ --hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \ --hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \ --hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \ --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \ --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \ --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \ --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be ================================================ FILE: configs/routing_helper_v2.template.yaml ================================================ {%- macro make_route_internal(cluster, options) %} cluster: {{ cluster }} {%- if 'timeout' in options -%} timeout: {{ options['timeout'] }}, {% endif %} retry_policy: retry_on: 5xx {%- endmacro %} {%- macro make_route(cluster) -%} {{ make_route_internal(cluster, clusters.get(cluster, {})) }} {%- endmacro -%} {%- macro internal_cluster_definition(service, options) -%} name: {{ service }} connect_timeout: 0.250s type: EDS eds_cluster_config: eds_config: api_config_source: api_type: REST cluster_names: - sds refresh_delay: 30s service_name: {{ service }} lb_policy: LEAST_REQUEST {% if 'max_requests' in options -%} circuit_breakers: thresholds: - priority: DEFAULT max_requests: {{ options['max_requests'] }} {% endif -%} health_checks: - http_health_check: path: /healthcheck service_name_matcher: prefix: accidents timeout: 2s interval: 5s interval_jitter: 5s unhealthy_threshold: 2 healthy_threshold: 2 outlier_detection: success_rate_stdev_factor: 1900 http2_protocol_options: {} {% endmacro -%} ================================================ FILE: configs/terminate_connect.v3.yaml ================================================ admin: access_log_path: /tmp/admin_access.log address: socket_address: protocol: TCP address: 127.0.0.1 port_value: 9902 static_resources: listeners: - name: listener_0 address: socket_address: protocol: TCP address: 127.0.0.1 port_value: 10001 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route virtual_hosts: - name: local_service domains: - "*" routes: - match: connect_matcher: {} route: cluster: service_google upgrade_configs: - upgrade_type: CONNECT connect_config: {} http_filters: - name: envoy.filters.http.router http2_protocol_options: allow_connect: true upgrade_configs: - upgrade_type: CONNECT clusters: - name: service_google connect_timeout: 0.25s type: LOGICAL_DNS # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN load_assignment: cluster_name: service_google endpoints: - lb_endpoints: - endpoint: address: socket_address: address: www.google.com port_value: 443 transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext sni: www.google.com ================================================ FILE: configs/using_deprecated_config.v2.yaml ================================================ admin: access_log_path: /tmp/admin_access.log address: socket_address: protocol: TCP address: 127.0.0.1 port_value: 9901 static_resources: listeners: - name: listener_0 address: socket_address: protocol: TCP address: 0.0.0.0 port_value: 10000 filter_chains: - filters: - name: envoy.filters.network.http_connection_manager typed_config: "@type": type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2.HttpConnectionManager stat_prefix: ingress_http route_config: name: local_route virtual_hosts: - name: local_service domains: ["*"] routes: - match: prefix: "/" route: host_rewrite: www.google.com cluster: service_google cors: allow_origin: - "test-origin-1" http_filters: - name: envoy.filters.http.router clusters: - name: service_google connect_timeout: 0.25s type: LOGICAL_DNS # Comment out the following line to test on v6 networks dns_lookup_family: V4_ONLY lb_policy: ROUND_ROBIN load_assignment: cluster_name: service_google endpoints: - lb_endpoints: - endpoint: address: socket_address: address: www.google.com port_value: 443 transport_socket: name: envoy.transport_sockets.tls typed_config: "@type": type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext sni: www.google.com tracing: http: name: envoy.tracers.zipkin config: collector_cluster: service_google collector_endpoint: /api/v1/spans collector_endpoint_version: HTTP_JSON_V1 layered_runtime: layers: - name: static_layer static_layer: envoy.deprecated_features:envoy.config.trace.v2.ZipkinConfig.HTTP_JSON_V1: true envoy.deprecated_features:envoy.api.v2.route.CorsPolicy.allow_origin: true ================================================ FILE: docs/BUILD ================================================ load( "//bazel:envoy_build_system.bzl", "envoy_package", ) licenses(["notice"]) # Apache 2 exports_files(["protodoc_manifest.yaml"]) envoy_package() # TODO(phlax): fix failing/excluded configs # the following config only fails on windows: # dns-cache-circuit-breaker: "Error: unable to read file: /etc/ssl/certs/ca-certificates.crt" filegroup( name = "configs", srcs = glob( ["root/**/*.yaml"], exclude = [ "root/intro/_include/life-of-a-request.yaml", "root/intro/arch_overview/security/_include/ssl.yaml", "root/configuration/http/http_filters/_include/dns-cache-circuit-breaker.yaml", "root/configuration/http/http_filters/_include/grpc-reverse-bridge-filter.yaml", "root/configuration/http/http_filters/_include/grpc-transcoder-filter.yaml", ], ), ) ================================================ FILE: docs/README.md ================================================ # Building documentation locally There are two methods to build the documentation, described below. In both cases, the generated output can be found in `generated/docs`. ## Building in an existing Envoy development environment If you have an [existing Envoy development environment](https://github.com/envoyproxy/envoy/tree/master/bazel#quick-start-bazel-build-for-developers), you should have the necessary dependencies and requirements and be able to build the documentation directly. ```bash ./docs/build.sh ``` By default configuration examples are going to be validated during build. To disable validation, set `SPHINX_SKIP_CONFIG_VALIDATION` environment variable to `true`: ```bash SPHINX_SKIP_CONFIG_VALIDATION=true docs/build.sh ``` ## Using the Docker build container to build the documentation If you *do not* have an existing development environment, you may wish to use the Docker build image that is used in continuous integration. This can be done as follows: ``` ./ci/run_envoy_docker.sh 'docs/build.sh' ``` To use this method you will need a minimum of 4-5GB of disk space available to accommodate the build image. # Creating a Pull Request with documentation changes When you create a Pull Request the documentation is rendered by CircleCI. If you are logged in to CircleCI (it is possible to authenticate using your Github account), you can view the rendered changes. To do this: - click `Details` in the `ci/circleci: docs` check at the bottom of the Pull Request. - click `ARTIFACTS` in the CircleCI dashboard - browse to the documentation root at `generated/docs/index.html`. # How the Envoy website and docs are updated 1. The docs are published to [docs/envoy/latest](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy/latest) on every commit to master. This process is handled by CircleCI with the [`publish.sh`](https://github.com/envoyproxy/envoy/blob/master/docs/publish.sh) script. 2. The docs are published to [docs/envoy](https://github.com/envoyproxy/envoyproxy.github.io/tree/master/docs/envoy) in a directory named after every tagged commit in this repo. Thus, on every tagged release there are snapped docs. ================================================ FILE: docs/_ext/validating_code_block.py ================================================ from typing import List from docutils import nodes from docutils.parsers.rst import Directive from docutils.parsers.rst import directives from sphinx.application import Sphinx from sphinx.util.docutils import SphinxDirective from sphinx.directives.code import CodeBlock from sphinx.errors import ExtensionError import os import subprocess class ValidatingCodeBlock(CodeBlock): """A directive that provides protobuf yaml formatting and validation. 'type-name' option is required and expected to conain full Envoy API type. An ExtensionError is raised on validation failure. Validation will be skipped if SPHINX_SKIP_CONFIG_VALIDATION environment variable is set. """ has_content = True required_arguments = CodeBlock.required_arguments optional_arguments = CodeBlock.optional_arguments final_argument_whitespace = CodeBlock.final_argument_whitespace option_spec = { 'type-name': directives.unchanged, } option_spec.update(CodeBlock.option_spec) skip_validation = (os.getenv('SPHINX_SKIP_CONFIG_VALIDATION') or 'false').lower() == 'true' def run(self): source, line = self.state_machine.get_source_and_line(self.lineno) # built-in directives.unchanged_required option validator produces a confusing error message if self.options.get('type-name') == None: raise ExtensionError("Expected type name in: {0} line: {1}".format(source, line)) if not ValidatingCodeBlock.skip_validation: args = [ 'bazel-bin/tools/config_validation/validate_fragment', self.options.get('type-name'), '-s', '\n'.join(self.content) ] completed = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf-8') if completed.returncode != 0: raise ExtensionError( "Failed config validation for type: '{0}' in: {1} line: {2}:\n {3}".format( self.options.get('type-name'), source, line, completed.stderr)) self.options.pop('type-name', None) return list(CodeBlock.run(self)) def setup(app): app.add_directive("validated-code-block", ValidatingCodeBlock) return { 'version': '0.1', 'parallel_read_safe': True, 'parallel_write_safe': True, } ================================================ FILE: docs/build.sh ================================================ #!/usr/bin/env bash # set SPHINX_SKIP_CONFIG_VALIDATION environment variable to true to skip # validation of configuration examples . tools/shell_utils.sh set -e # We need to set ENVOY_DOCS_VERSION_STRING and ENVOY_DOCS_RELEASE_LEVEL for Sphinx. # We also validate that the tag and version match at this point if needed. if [ -n "$CIRCLE_TAG" ] then # Check the git tag matches the version number in the VERSION file. VERSION_NUMBER=$(cat VERSION) if [ "v${VERSION_NUMBER}" != "${CIRCLE_TAG}" ]; then echo "Given git tag does not match the VERSION file content:" echo "${CIRCLE_TAG} vs $(cat VERSION)" exit 1 fi # Check the version_history.rst contains current release version. grep --fixed-strings "$VERSION_NUMBER" docs/root/version_history/current.rst \ || (echo "Git tag not found in version_history/current.rst" && exit 1) # Now that we know there is a match, we can use the tag. export ENVOY_DOCS_VERSION_STRING="tag-$CIRCLE_TAG" export ENVOY_DOCS_RELEASE_LEVEL=tagged export ENVOY_BLOB_SHA="$CIRCLE_TAG" else BUILD_SHA=$(git rev-parse HEAD) VERSION_NUM=$(cat VERSION) export ENVOY_DOCS_VERSION_STRING="${VERSION_NUM}"-"${BUILD_SHA:0:6}" export ENVOY_DOCS_RELEASE_LEVEL=pre-release export ENVOY_BLOB_SHA="$BUILD_SHA" fi SCRIPT_DIR="$(dirname "$0")" SRC_DIR="$(dirname "$SCRIPT_DIR")" API_DIR="${SRC_DIR}"/api CONFIGS_DIR="${SRC_DIR}"/configs BUILD_DIR=build_docs [[ -z "${DOCS_OUTPUT_DIR}" ]] && DOCS_OUTPUT_DIR=generated/docs [[ -z "${GENERATED_RST_DIR}" ]] && GENERATED_RST_DIR=generated/rst rm -rf "${DOCS_OUTPUT_DIR}" mkdir -p "${DOCS_OUTPUT_DIR}" rm -rf "${GENERATED_RST_DIR}" mkdir -p "${GENERATED_RST_DIR}" source_venv "$BUILD_DIR" pip3 install --require-hashes -r "${SCRIPT_DIR}"/requirements.txt # Clean up any stale files in the API tree output. Bazel remembers valid cached # files still. rm -rf bazel-bin/external/envoy_api_canonical EXTENSION_DB_PATH="$(realpath "${BUILD_DIR}/extension_db.json")" export EXTENSION_DB_PATH # This is for local RBE setup, should be no-op for builds without RBE setting in bazelrc files. IFS=" " read -ra BAZEL_BUILD_OPTIONS <<< "${BAZEL_BUILD_OPTIONS:-}" BAZEL_BUILD_OPTIONS+=( "--remote_download_outputs=all" "--strategy=protodoc=sandboxed,local" "--action_env=ENVOY_BLOB_SHA" "--action_env=EXTENSION_DB_PATH") # Generate extension database. This maps from extension name to extension # metadata, based on the envoy_cc_extension() Bazel target attributes. ./docs/generate_extension_db.py "${EXTENSION_DB_PATH}" # Generate RST for the lists of trusted/untrusted extensions in # intro/arch_overview/security docs. mkdir -p "${GENERATED_RST_DIR}"/intro/arch_overview/security ./docs/generate_extension_rst.py "${EXTENSION_DB_PATH}" "${GENERATED_RST_DIR}"/intro/arch_overview/security # Generate RST for external dependency docs in intro/arch_overview/security. ./docs/generate_external_dep_rst.py "${GENERATED_RST_DIR}"/intro/arch_overview/security function generate_api_rst() { local proto_target declare -r API_VERSION=$1 echo "Generating ${API_VERSION} API RST..." # Generate the extensions docs bazel build "${BAZEL_BUILD_OPTIONS[@]}" @envoy_api_canonical//:"${API_VERSION}"_protos --aspects \ tools/protodoc/protodoc.bzl%protodoc_aspect --output_groups=rst # Fill in boiler plate for extensions that have google.protobuf.Empty as their # config. bazel run "${BAZEL_BUILD_OPTIONS[@]}" //tools/protodoc:generate_empty \ "${PWD}"/docs/empty_extensions.json "${PWD}/${GENERATED_RST_DIR}/api-${API_VERSION}"/config # We do ** matching below to deal with Bazel cache blah (source proto artifacts # are nested inside source package targets). shopt -s globstar # Find all source protos. proto_target=$(bazel query "labels(srcs, labels(deps, @envoy_api_canonical//:${API_VERSION}_protos))") declare -r proto_target # Only copy in the protos we care about and know how to deal with in protodoc. for p in ${proto_target} do declare PROTO_FILE_WITHOUT_PREFIX="${p#@envoy_api_canonical//}" declare PROTO_FILE_CANONICAL="${PROTO_FILE_WITHOUT_PREFIX/://}" # We use ** glob matching here to deal with the fact that we have something # like # bazel-bin/external/envoy_api_canonical/envoy/admin/v2alpha/pkg/envoy/admin/v2alpha/certs.proto.proto # and we don't want to have to do a nested loop and slow bazel query to # recover the canonical package part of the path. declare SRCS=(bazel-bin/external/envoy_api_canonical/**/"${PROTO_FILE_CANONICAL}.rst") # While we may have reformatted the file multiple times due to the transitive # dependencies in the aspect above, they all look the same. So, just pick an # arbitrary match and we're done. declare SRC="${SRCS[0]}" declare DST="${GENERATED_RST_DIR}/api-${API_VERSION}/${PROTO_FILE_CANONICAL#envoy/}".rst mkdir -p "$(dirname "${DST}")" cp -f "${SRC}" "$(dirname "${DST}")" done } generate_api_rst v2 generate_api_rst v3 # Fixup anchors and references in v3 so they form a distinct namespace. # TODO(htuch): Do this in protodoc generation in the future. find "${GENERATED_RST_DIR}"/api-v3 -name "*.rst" -print0 | xargs -0 sed -i -e "s#envoy_api_#envoy_v3_api_#g" find "${GENERATED_RST_DIR}"/api-v3 -name "*.rst" -print0 | xargs -0 sed -i -e "s#config_resource_monitors#v3_config_resource_monitors#g" # xDS protocol spec. mkdir -p ${GENERATED_RST_DIR}/api-docs cp -f "${API_DIR}"/xds_protocol.rst "${GENERATED_RST_DIR}/api-docs/xds_protocol.rst" # Edge hardening example YAML. mkdir -p "${GENERATED_RST_DIR}"/configuration/best_practices cp -f "${CONFIGS_DIR}"/google-vrp/envoy-edge.yaml "${GENERATED_RST_DIR}"/configuration/best_practices rsync -rav "${API_DIR}/diagrams" "${GENERATED_RST_DIR}/api-docs" rsync -av "${SCRIPT_DIR}"/root/ "${SCRIPT_DIR}"/conf.py "${SCRIPT_DIR}"/_ext "${GENERATED_RST_DIR}" # To speed up validate_fragment invocations in validating_code_block bazel build "${BAZEL_BUILD_OPTIONS[@]}" //tools/config_validation:validate_fragment sphinx-build -W --keep-going -b html "${GENERATED_RST_DIR}" "${DOCS_OUTPUT_DIR}" ================================================ FILE: docs/conf.py ================================================ # -*- coding: utf-8 -*- # # envoy documentation build configuration file, created by # sphinx-quickstart on Sat May 28 10:51:27 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from datetime import datetime import os from sphinx.directives.code import CodeBlock import sphinx_rtd_theme import sys # https://stackoverflow.com/questions/44761197/how-to-use-substitution-definitions-with-code-blocks class SubstitutionCodeBlock(CodeBlock): """ Similar to CodeBlock but replaces placeholders with variables. See "substitutions" below. """ def run(self): """ Replace placeholders with given variables. """ app = self.state.document.settings.env.app new_content = [] existing_content = self.content for item in existing_content: for pair in app.config.substitutions: original, replacement = pair item = item.replace(original, replacement) new_content.append(item) self.content = new_content return list(CodeBlock.run(self)) def setup(app): app.add_config_value('release_level', '', 'env') app.add_config_value('substitutions', [], 'html') app.add_directive('substitution-code-block', SubstitutionCodeBlock) if not os.environ.get('ENVOY_DOCS_RELEASE_LEVEL'): raise Exception("ENVOY_DOCS_RELEASE_LEVEL env var must be defined") release_level = os.environ['ENVOY_DOCS_RELEASE_LEVEL'] blob_sha = os.environ['ENVOY_BLOB_SHA'] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. sys.path.append(os.path.abspath("./_ext")) extensions = [ 'sphinxcontrib.httpdomain', 'sphinx.ext.extlinks', 'sphinx.ext.ifconfig', 'sphinx_tabs.tabs', 'sphinx_copybutton', 'validating_code_block' ] extlinks = { 'repo': ('https://github.com/envoyproxy/envoy/blob/{}/%s'.format(blob_sha), ''), 'api': ('https://github.com/envoyproxy/envoy/blob/{}/api/%s'.format(blob_sha), ''), } # Setup global substitutions if 'pre-release' in release_level: substitutions = [('|envoy_docker_image|', 'envoy-dev:{}'.format(blob_sha))] else: substitutions = [('|envoy_docker_image|', 'envoy:{}'.format(blob_sha))] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] copybutton_prompt_text = r"\$ |PS>" copybutton_prompt_is_regexp = True # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'envoy' copyright = u'2016-{}, Envoy Project Authors'.format(datetime.now().year) author = u'Envoy Project Authors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. if not os.environ.get('ENVOY_DOCS_VERSION_STRING'): raise Exception("ENVOY_DOCS_VERSION_STRING env var must be defined") # The short X.Y version. version = os.environ['ENVOY_DOCS_VERSION_STRING'] # The full version, including alpha/beta/rc tags. release = os.environ['ENVOY_DOCS_VERSION_STRING'] # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [ '_build', '_venv', 'Thumbs.db', '.DS_Store', 'api-v2/api/v2/endpoint/load_report.proto.rst', 'api-v2/service/discovery/v2/hds.proto.rst', ] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. #pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'logo_only': True, 'includehidden': False, } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. # " v documentation" by default. #html_title = u'envoy v1.0.0' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '_static/img/envoy-logo.png' # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'favicon.ico' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_style = 'css/envoy.css' # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. #html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'envoydoc' ================================================ FILE: docs/empty_extensions.json ================================================ { "envoy.filters.http.cors": { "title": "CORS processing", "path": "filter/http/cors", "description": "https://en.wikipedia.org/wiki/Cross-origin_resource_sharing", "ref": "config_http_filters_cors" }, "envoy.filters.http.dynamo": { "title": "AWS DynamoDB", "path": "filter/http/dynamo", "description": "https://aws.amazon.com/dynamodb/", "ref": "config_http_filters_dynamo" }, "envoy.filters.http.grpc_http1_bridge": { "title": "gRPC HTTP/1 bridge", "path": "filter/http/grpc_http1_bridge", "description": "HTTP filter that bridges HTTP/1.1 unary gRPC to compliant HTTP/2 gRPC", "ref": "config_http_filters_grpc_bridge" }, "envoy.filters.http.grpc_web": { "title": "gRPC Web", "path": "filter/http/grpc_web", "description": "https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-WEB.md", "ref": "config_http_filters_grpc_web" }, "envoy.filters.listener.http_inspector": { "title": "HTTP Inspector", "path": "filter/listener/http_inspector", "ref": "config_listener_filters_http_inspector" }, "envoy.filters.listener.original_dst": { "title": "Original Destination", "path": "filter/listener/original_dst", "ref": "config_listener_filters_original_dst" }, "envoy.filters.listener.proxy_protocol": { "title": "Proxy Protocol", "path": "filter/listener/proxy_protocol", "ref": "config_listener_filters_proxy_protocol" }, "envoy.filters.listener.tls_inspector": { "title": "TLS Inspector", "path": "filter/listener/tls_inspector", "ref": "config_listener_filters_tls_inspector" }, "envoy.filters.network.echo": { "title": "Echo", "path": "filter/network/echo", "ref": "config_network_filters_echo" }, "envoy.filters.network.sni_cluster": { "title": "SNI Cluster", "path": "filter/network/sni_cluster", "ref": "config_network_filters_sni_cluster" }, "envoy.retry_host_predicates.previous_hosts": { "title": "Previous Hosts", "path": "retry/previous_hosts", "ref": "arch_overview_http_retry_plugins" }, "envoy.retry_host_predicates.omit_canary_hosts": { "title": "Omit Canary Hosts", "path": "retry/omit_canary_hosts", "ref": "arch_overview_http_retry_plugins" } } ================================================ FILE: docs/generate_extension_db.py ================================================ #!/usr/bin/env python3 # Generate an extension database, a JSON file mapping from qualified well known # extension name to metadata derived from the envoy_cc_extension target. import json import os import pathlib import shutil import subprocess import sys from importlib.util import spec_from_loader, module_from_spec from importlib.machinery import SourceFileLoader BUILDOZER_PATH = os.getenv("BUILDOZER_BIN") or (os.path.expandvars("$GOPATH/bin/buildozer") if os.getenv("GOPATH") else shutil.which("buildozer")) # source/extensions/extensions_build_config.bzl must have a .bzl suffix for Starlark # import, so we are forced to do this workaround. _extensions_build_config_spec = spec_from_loader( 'extensions_build_config', SourceFileLoader('extensions_build_config', 'source/extensions/extensions_build_config.bzl')) extensions_build_config = module_from_spec(_extensions_build_config_spec) _extensions_build_config_spec.loader.exec_module(extensions_build_config) class ExtensionDbError(Exception): pass def IsMissing(value): return value == '(missing)' def GetExtensionMetadata(target): r = subprocess.run( [BUILDOZER_PATH, '-stdout', 'print security_posture status undocumented', target], stdout=subprocess.PIPE, stderr=subprocess.PIPE) security_posture, status, undocumented = r.stdout.decode('utf-8').strip().split(' ') if IsMissing(security_posture): raise ExtensionDbError( 'Missing security posture for %s. Please make sure the target is an envoy_cc_extension and security_posture is set' % target) return { 'security_posture': security_posture, 'undocumented': False if IsMissing(undocumented) else bool(undocumented), 'status': 'stable' if IsMissing(status) else status, } if __name__ == '__main__': output_path = sys.argv[1] extension_db = {} for extension, target in extensions_build_config.EXTENSIONS.items(): extension_db[extension] = GetExtensionMetadata(target) # The TLS and generic upstream extensions are hard-coded into the build, so # not in source/extensions/extensions_build_config.bzl extension_db['envoy.transport_sockets.tls'] = GetExtensionMetadata( '//source/extensions/transport_sockets/tls:config') extension_db['envoy.upstreams.http.generic'] = GetExtensionMetadata( '//source/extensions/upstreams/http/generic:config') pathlib.Path(output_path).write_text(json.dumps(extension_db)) ================================================ FILE: docs/generate_extension_rst.py ================================================ #!/usr/bin/env python3 # Generate RST lists of extensions grouped by their security posture. from collections import defaultdict import json import pathlib import sys def FormatItem(extension, metadata): if metadata['undocumented']: item = '* %s' % extension else: item = '* :ref:`%s `' % (extension, extension) if metadata['status'] == 'alpha': item += ' (alpha)' return item if __name__ == '__main__': extension_db_path = sys.argv[1] security_rst_root = sys.argv[2] extension_db = json.loads(pathlib.Path(extension_db_path).read_text()) security_postures = defaultdict(list) for extension, metadata in extension_db.items(): security_postures[metadata['security_posture']].append(extension) for sp, extensions in security_postures.items(): output_path = pathlib.Path(security_rst_root, 'secpos_%s.rst' % sp) content = '\n'.join( FormatItem(extension, extension_db[extension]) for extension in sorted(extensions) if extension_db[extension]['status'] != 'wip') output_path.write_text(content) ================================================ FILE: docs/generate_external_dep_rst.py ================================================ #!/usr/bin/env python3 # Generate RST lists of external dependencies. from collections import defaultdict, namedtuple import pathlib import sys import urllib.parse from importlib.util import spec_from_loader, module_from_spec from importlib.machinery import SourceFileLoader # bazel/repository_locations.bzl must have a .bzl suffix for Starlark import, so # we are forced to do this workaround. _repository_locations_spec = spec_from_loader( 'repository_locations', SourceFileLoader('repository_locations', 'bazel/repository_locations.bzl')) repository_locations = module_from_spec(_repository_locations_spec) _repository_locations_spec.loader.exec_module(repository_locations) # Render a CSV table given a list of table headers, widths and list of rows # (each a list of strings). def CsvTable(headers, widths, rows): csv_rows = '\n '.join(', '.join(row) for row in rows) return f'''.. csv-table:: :header: {', '.join(headers)} :widths: {', '.join(str(w) for w in widths) } {csv_rows} ''' # Anonymous external RST link for a given URL. def RstLink(text, url): return f'`{text} <{url}>`__' # NIST CPE database search URL for a given CPE. def NistCpeUrl(cpe): encoded_cpe = urllib.parse.quote(cpe) return 'https://nvd.nist.gov/products/cpe/search/results?keyword=%s&status=FINAL&orderBy=CPEURI&namingFormat=2.3' % encoded_cpe # Render version strings human readable. def RenderVersion(version): # Heuristic, almost certainly a git SHA if len(version) == 40: # Abbreviate git SHA return version[:7] return version def RenderTitle(title): underline = '~' * len(title) return f'\n{title}\n{underline}\n\n' if __name__ == '__main__': security_rst_root = sys.argv[1] Dep = namedtuple('Dep', ['name', 'sort_name', 'version', 'cpe', 'last_updated']) use_categories = defaultdict(lambda: defaultdict(list)) # Bin rendered dependencies into per-use category lists. for k, v in repository_locations.DEPENDENCY_REPOSITORIES.items(): cpe = v.get('cpe', '') if cpe == 'N/A': cpe = '' if cpe: cpe = RstLink(cpe, NistCpeUrl(cpe)) project_name = v['project_name'] project_url = v['project_url'] name = RstLink(project_name, project_url) version = RstLink(RenderVersion(v['version']), v['urls'][0]) last_updated = v['last_updated'] dep = Dep(name, project_name.lower(), version, cpe, last_updated) for category in v['use_category']: for ext in v.get('extensions', ['core']): use_categories[category][ext].append(dep) def CsvRow(dep): return [dep.name, dep.version, dep.last_updated, dep.cpe] # Generate per-use category RST with CSV tables. for category, exts in use_categories.items(): content = '' for ext_name, deps in sorted(exts.items()): if ext_name != 'core': content += RenderTitle(ext_name) output_path = pathlib.Path(security_rst_root, f'external_dep_{category}.rst') content += CsvTable(['Name', 'Version', 'Last updated', 'CPE'], [2, 1, 1, 2], [CsvRow(dep) for dep in sorted(deps, key=lambda d: d.sort_name)]) output_path.write_text(content) ================================================ FILE: docs/protodoc_manifest.yaml ================================================ fields: envoy.config.bootstrap.v3.Bootstrap.overload_manager: edge_config: example: refresh_interval: 0.25s resource_monitors: - name: "envoy.resource_monitors.fixed_heap" typed_config: "@type": type.googleapis.com/envoy.config.resource_monitor.fixed_heap.v2alpha.FixedHeapConfig max_heap_size_bytes: 1073741824 actions: - name: "envoy.overload_actions.shrink_heap" triggers: - name: "envoy.resource_monitors.fixed_heap" threshold: value: 0.90 - name: "envoy.overload_actions.stop_accepting_requests" triggers: - name: "envoy.resource_monitors.fixed_heap" threshold: value: 0.95 envoy.config.cluster.v3.Cluster.per_connection_buffer_limit_bytes: edge_config: { example: 32768 } envoy.config.cluster.v3.Cluster.http2_protocol_options: edge_config: example: initial_stream_window_size: 65536 # 64 KiB initial_connection_window_size: 1048576 # 1 MiB envoy.config.listener.v3.Listener.per_connection_buffer_limit_bytes: edge_config: { example: 32768 } envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.common_http_protocol_options: edge_config: example: idle_timeout: 900s # 15 mins headers_with_underscores_action: REJECT_REQUEST envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.http2_protocol_options: edge_config: example: max_concurrent_streams: 100 initial_stream_window_size: 65536 # 64 KiB initial_connection_window_size: 1048576 # 1 MiB envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.stream_idle_timeout: edge_config: example: 300s # 5 mins envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.request_timeout: edge_config: note: > This timeout is not compatible with streaming requests. example: 300s # 5 mins envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager.use_remote_address: edge_config: { example: true } ================================================ FILE: docs/publish.sh ================================================ #!/bin/bash # This is run on every commit that CircleCI picks up. It assumes that docs have already been built # via docs/build.sh. The push behavior differs depending on the nature of the commit: # * Tag commit (e.g. v1.6.0): pushes docs to versioned location, e.g. # https://www.envoyproxy.io/docs/envoy/v1.6.0/. # * Master commit: pushes docs to https://www.envoyproxy.io/docs/envoy/latest/. # * Otherwise: noop. set -e DOCS_DIR=generated/docs CHECKOUT_DIR=../envoy-docs BUILD_SHA=$(git rev-parse HEAD) if [ -n "$CIRCLE_TAG" ] then PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/"$CIRCLE_TAG" elif [ -z "$CIRCLE_PULL_REQUEST" ] && [ "$CIRCLE_BRANCH" == "master" ] then PUBLISH_DIR="$CHECKOUT_DIR"/docs/envoy/latest else echo "Ignoring docs push" exit 0 fi echo 'cloning' git clone git@github.com:envoyproxy/envoyproxy.github.io "$CHECKOUT_DIR" git -C "$CHECKOUT_DIR" fetch git -C "$CHECKOUT_DIR" checkout -B master origin/master rm -fr "$PUBLISH_DIR" mkdir -p "$PUBLISH_DIR" cp -r "$DOCS_DIR"/* "$PUBLISH_DIR" cd "$CHECKOUT_DIR" git config user.name "envoy-docs(travis)" git config user.email envoy-docs@users.noreply.github.com echo 'add' git add . echo 'commit' git commit -m "docs envoy@$BUILD_SHA" echo 'push' git push origin master ================================================ FILE: docs/requirements.txt ================================================ alabaster==0.7.12 \ --hash=sha256:446438bdcca0e05bd45ea2de1668c1d9b032e1a9154c2c259092d77031ddd359 \ --hash=sha256:a661d72d58e6ea8a57f7a86e37d86716863ee5e92788398526d58b26a4e4dc02 Babel==2.8.0 \ --hash=sha256:1aac2ae2d0d8ea368fa90906567f5c08463d98ade155c0c4bfedd6a0f7160e38 \ --hash=sha256:d670ea0b10f8b723672d3a6abeb87b565b244da220d76b4dba1b66269ec152d4 certifi==2020.6.20 \ --hash=sha256:5930595817496dd21bb8dc35dad090f1c2cd0adfaf21204bf6732ca5d8ee34d3 \ --hash=sha256:8fc0819f1f30ba15bdb34cceffb9ef04d99f420f68eb75d901e9560b8749fc41 chardet==3.0.4 \ --hash=sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae \ --hash=sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691 docutils==0.16 \ --hash=sha256:0c5b78adfbf7762415433f5515cd5c9e762339e23369dbe8000d84a4bf4ab3af \ --hash=sha256:c2de3a60e9e7d07be26b7f2b00ca0309c207e06c100f9cc2a94931fc75a478fc gitdb==4.0.5 \ --hash=sha256:91f36bfb1ab7949b3b40e23736db18231bf7593edada2ba5c3a174a7b23657ac \ --hash=sha256:c9e1f2d0db7ddb9a704c2a0217be31214e91a4fe1dea1efad19ae42ba0c285c9 GitPython==3.1.8 \ --hash=sha256:080bf8e2cf1a2b907634761c2eaefbe83b69930c94c66ad11b65a8252959f912 \ --hash=sha256:1858f4fd089abe92ae465f01d5aaaf55e937eca565fb2c1fce35a51b5f85c910 idna==2.10 \ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \ --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0 imagesize==1.2.0 \ --hash=sha256:6965f19a6a2039c7d48bca7dba2473069ff854c36ae6f19d2cde309d998228a1 \ --hash=sha256:b1f6b5a4eab1f73479a50fb79fcf729514a900c341d8503d62a62dbc4127a2b1 Jinja2==2.11.2 \ --hash=sha256:89aab215427ef59c34ad58735269eb58b1a5808103067f7bb9d5836c651b3bb0 \ --hash=sha256:f0a4641d3cf955324a89c04f3d94663aa4d638abe8f733ecd3582848e1c37035 MarkupSafe==1.1.1 \ --hash=sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473 \ --hash=sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161 \ --hash=sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235 \ --hash=sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5 \ --hash=sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42 \ --hash=sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff \ --hash=sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b \ --hash=sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1 \ --hash=sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e \ --hash=sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183 \ --hash=sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66 \ --hash=sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b \ --hash=sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1 \ --hash=sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15 \ --hash=sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1 \ --hash=sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e \ --hash=sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b \ --hash=sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905 \ --hash=sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735 \ --hash=sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d \ --hash=sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e \ --hash=sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d \ --hash=sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c \ --hash=sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21 \ --hash=sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2 \ --hash=sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5 \ --hash=sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b \ --hash=sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6 \ --hash=sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f \ --hash=sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f \ --hash=sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2 \ --hash=sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7 \ --hash=sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be packaging==20.4 \ --hash=sha256:4357f74f47b9c12db93624a82154e9b120fa8293699949152b22065d556079f8 \ --hash=sha256:998416ba6962ae7fbd6596850b80e17859a5753ba17c32284f67bfff33784181 Pygments==2.7.1 \ --hash=sha256:307543fe65c0947b126e83dd5a61bd8acbd84abec11f43caebaf5534cbc17998 \ --hash=sha256:926c3f319eda178d1bd90851e4317e6d8cdb5e292a3386aac9bd75eca29cf9c7 pyparsing==2.4.7 \ --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \ --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b pytz==2020.1 \ --hash=sha256:a494d53b6d39c3c6e44c3bec237336e14305e4f29bbf800b599253057fbb79ed \ --hash=sha256:c35965d010ce31b23eeb663ed3cc8c906275d6be1a34393a1d73a41febf4a048 requests==2.24.0 \ --hash=sha256:b3559a131db72c33ee969480840fff4bb6dd111de7dd27c8ee1f820f4f00231b \ --hash=sha256:fe75cc94a9443b9246fc7049224f75604b113c36acb93f87b80ed42c44cbb898 six==1.15.0 \ --hash=sha256:30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259 \ --hash=sha256:8b74bedcbbbaca38ff6d7491d76f2b06b3592611af620f8426e82dddb04a5ced smmap==3.0.4 \ --hash=sha256:54c44c197c819d5ef1991799a7e30b662d1e520f2ac75c9efbeb54a742214cf4 \ --hash=sha256:9c98bbd1f9786d22f14b3d4126894d56befb835ec90cef151af566c7e19b5d24 snowballstemmer==2.0.0 \ --hash=sha256:209f257d7533fdb3cb73bdbd24f436239ca3b2fa67d56f6ff88e86be08cc5ef0 \ --hash=sha256:df3bac3df4c2c01363f3dd2cfa78cce2840a79b9f1c2d2de9ce8d31683992f52 Sphinx==3.2.1 \ --hash=sha256:321d6d9b16fa381a5306e5a0b76cd48ffbc588e6340059a729c6fdd66087e0e8 \ --hash=sha256:ce6fd7ff5b215af39e2fcd44d4a321f6694b4530b6f2b2109b64d120773faea0 sphinx-copybutton==0.3.0 \ --hash=sha256:4becad3a1e7c50211f1477e34fd4b6d027680e1612f497cb5b88cf85bccddaaa \ --hash=sha256:4cd06afd0588aa43eba968bfc6105e1ec6546c50a51f880af1d89afaebc6fb58 sphinx-rtd-theme==0.5.0 \ --hash=sha256:22c795ba2832a169ca301cd0a083f7a434e09c538c70beb42782c073651b707d \ --hash=sha256:373413d0f82425aaa28fb288009bf0d0964711d347763af2f1b65cafcb028c82 sphinx-tabs==1.3.0 \ --hash=sha256:537857f91f1b371f7b45eb8ac83001618b3e3178c78df073d2cc4558a8e66ef5 \ --hash=sha256:54132c8a57aa19bba6e17fe26eb94ea9df531708ff3f509b119313b32d0d5aff sphinxcontrib-applehelp==1.0.2 \ --hash=sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a \ --hash=sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58 sphinxcontrib-devhelp==1.0.2 \ --hash=sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e \ --hash=sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4 sphinxcontrib-htmlhelp==1.0.3 \ --hash=sha256:3c0bc24a2c41e340ac37c85ced6dafc879ab485c095b1d65d2461ac2f7cca86f \ --hash=sha256:e8f5bb7e31b2dbb25b9cc435c8ab7a79787ebf7f906155729338f3156d93659b sphinxcontrib-httpdomain==1.7.0 \ --hash=sha256:1fb5375007d70bf180cdd1c79e741082be7aa2d37ba99efe561e1c2e3f38191e \ --hash=sha256:ac40b4fba58c76b073b03931c7b8ead611066a6aebccafb34dc19694f4eb6335 sphinxcontrib-jsmath==1.0.1 \ --hash=sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178 \ --hash=sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8 sphinxcontrib-qthelp==1.0.3 \ --hash=sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72 \ --hash=sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6 sphinxcontrib-serializinghtml==1.1.4 \ --hash=sha256:eaa0eccc86e982a9b939b2b82d12cc5d013385ba5eadcc7e4fed23f4405f77bc \ --hash=sha256:f242a81d423f59617a8e5cf16f5d4d74e28ee9a66f9e5b637a18082991db5a9a urllib3==1.25.10 \ --hash=sha256:91056c15fa70756691db97756772bb1eb9678fa585d9184f24534b100dc60f4a \ --hash=sha256:e7983572181f5e1522d9c98453462384ee92a0be7fac5f1413a1e35c56cc0461 ================================================ FILE: docs/root/_static/css/envoy.css ================================================ @import url("theme.css"); /* Splits a long line descriptions in tables in to multiple lines */ .wy-table-responsive table td, .wy-table-responsive table th { white-space: normal !important; } /* align multi line csv table columns */ table.docutils div.line-block { margin-left: 0; } /* Breaking long words */ .wy-nav-content { overflow-wrap: break-word; max-width: 1000px; } /* To style the API version label of a search result item */ .api-version-label { border-radius: 20%; background-color: #c0c0c0; color: #ffffff; margin-left: 4px; padding: 4px; } ================================================ FILE: docs/root/_static/placeholder ================================================ ================================================ FILE: docs/root/_static/searchtools.js ================================================ /* * searchtools.js * ~~~~~~~~~~~~~~~~ * * Sphinx JavaScript utilities for the full-text search. * * :copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS. * :license: BSD, see LICENSE for details. * */ // Modified from https://raw.githubusercontent.com/sphinx-doc/sphinx/3.x/sphinx/themes/basic/static/searchtools.js // to have renderApiVersionLabel to render the API version for each search result item. if (!Scorer) { /** * Simple result scoring code. */ var Scorer = { // Implement the following function to further tweak the score for each result // The function takes a result array [filename, title, anchor, descr, score] // and returns the new score. /* score: function(result) { return result[4]; }, */ // query matches the full name of an object objNameMatch: 11, // or matches in the last dotted part of the object name objPartialMatch: 6, // Additive scores depending on the priority of the object objPrio: { 0: 15, // used to be importantResults 1: 5, // used to be objectResults 2: -5, }, // used to be unimportantResults // Used when the priority is not in the mapping. objPrioDefault: 0, // query found in title title: 15, partialTitle: 7, // query found in terms term: 5, partialTerm: 2, }; } if (!splitQuery) { function splitQuery(query) { return query.split(/\s+/); } } /** * Search Module */ var Search = { _index: null, _queued_query: null, _pulse_status: -1, htmlToText: function (htmlString) { var htmlElement = document.createElement("span"); htmlElement.innerHTML = htmlString; $(htmlElement).find(".headerlink").remove(); docContent = $(htmlElement).find("[role=main]")[0]; if (docContent === undefined) { console.warn( "Content block not found. Sphinx search tries to obtain it " + "via '[role=main]'. Could you check your theme or template." ); return ""; } return docContent.textContent || docContent.innerText; }, init: function () { var params = $.getQueryParameters(); if (params.q) { var query = params.q[0]; $('input[name="q"]')[0].value = query; this.performSearch(query); } }, loadIndex: function (url) { $.ajax({ type: "GET", url: url, data: null, dataType: "script", cache: true, complete: function (jqxhr, textstatus) { if (textstatus != "success") { document.getElementById("searchindexloader").src = url; } }, }); }, setIndex: function (index) { var q; this._index = index; if ((q = this._queued_query) !== null) { this._queued_query = null; Search.query(q); } }, hasIndex: function () { return this._index !== null; }, deferQuery: function (query) { this._queued_query = query; }, stopPulse: function () { this._pulse_status = 0; }, startPulse: function () { if (this._pulse_status >= 0) return; function pulse() { var i; Search._pulse_status = (Search._pulse_status + 1) % 4; var dotString = ""; for (i = 0; i < Search._pulse_status; i++) dotString += "."; Search.dots.text(dotString); if (Search._pulse_status > -1) window.setTimeout(pulse, 500); } pulse(); }, /** * perform a search for something (or wait until index is loaded) */ performSearch: function (query) { // create the required interface elements this.out = $("#search-results"); this.title = $("

" + _("Searching") + "

").appendTo(this.out); this.dots = $("").appendTo(this.title); this.status = $('

 

').appendTo(this.out); this.output = $('